本文整理汇总了Python中tensorflow.gradients函数的典型用法代码示例。如果您正苦于以下问题:Python gradients函数的具体用法?Python gradients怎么用?Python gradients使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了gradients函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。
示例1: _build_solvers
def _build_solvers(self, json_data):
actor_stepsize = 0.001 if (
self.ACTOR_STEPSIZE_KEY not in json_data) else json_data[self.ACTOR_STEPSIZE_KEY]
actor_momentum = 0.9 if (
self.ACTOR_MOMENTUM_KEY not in json_data) else json_data[self.ACTOR_MOMENTUM_KEY]
critic_stepsize = 0.01 if (
self.CRITIC_STEPSIZE_KEY not in json_data) else json_data[self.CRITIC_STEPSIZE_KEY]
critic_momentum = 0.9 if (
self.CRITIC_MOMENTUM_KEY not in json_data) else json_data[self.CRITIC_MOMENTUM_KEY]
critic_vars = self._tf_vars('main/critic')
critic_opt = tf.train.MomentumOptimizer(learning_rate=critic_stepsize,
momentum=critic_momentum)
self.critic_grad_tf = tf.gradients(self.critic_loss_tf, critic_vars)
self.critic_solver = MPISolver(self.sess, critic_opt, critic_vars)
self._actor_stepsize_tf = tf.get_variable(dtype=tf.float32,
name='actor_stepsize',
initializer=actor_stepsize,
trainable=False)
self._actor_stepsize_ph = tf.get_variable(dtype=tf.float32, name='actor_stepsize_ph', shape=[])
self._actor_stepsize_update_op = self._actor_stepsize_tf.assign(self._actor_stepsize_ph)
actor_vars = self._tf_vars('main/actor')
actor_opt = tf.train.MomentumOptimizer(learning_rate=self._actor_stepsize_tf,
momentum=actor_momentum)
self.actor_grad_tf = tf.gradients(self.actor_loss_tf, actor_vars)
self.actor_solver = MPISolver(self.sess, actor_opt, actor_vars)
return
开发者ID:bulletphysics,项目名称:bullet3,代码行数:30,代码来源:ppo_agent.py
示例2: build_model
def build_model(self):
self.x = tf.placeholder(tf.float32, [self.reader.vocab_size], name="input")
self.x_idx = tf.placeholder(tf.int32, [None], name='x_idx') # mask paddings
self.build_encoder()
self.build_generator()
self.objective = self.kl +self.recons_loss
# optimizer for alternative update
optimizer1 = tf.train.AdamOptimizer(learning_rate=self.learning_rate)
optimizer2 = tf.train.AdamOptimizer(learning_rate=0.1)
fullvars = tf.GraphKeys.TRAINABLE_VARIABLES
print 'fullvars:',fullvars
enc_vars = tf.get_collection(fullvars,scope='encoder')
print enc_vars
dec_vars = tf.get_collection(fullvars,scope='generator')
print dec_vars
self.lossL2_enc = tf.add_n([ tf.nn.l2_loss(v) for v in enc_vars if 'bias' not in v.name]) * 0.0001
self.lossL2_dec = tf.add_n([ tf.nn.l2_loss(v) for v in dec_vars if 'bias' not in v.name])
print 'lossL2_enc:',self.lossL2_enc
print 'lossL2_dec:',self.lossL2_dec
enc_grads = tf.gradients(self.kl+self.lossL2_enc, enc_vars)
dec_grads = tf.gradients(self.recons_loss+self.lossL2_dec, dec_vars)
self.optim_enc = optimizer1.apply_gradients(zip(enc_grads, enc_vars))
self.optim_dec = optimizer2.apply_gradients(zip(dec_grads, dec_vars))
开发者ID:wujsAct,项目名称:TeachingMachineReadAndComprehend,代码行数:31,代码来源:nvdm.py
示例3: testGradQtm1
def testGradQtm1(self):
with self.test_session() as sess:
gradients = tf.gradients([-self.qlearning.loss], [self.q_tm1])
gradients_reference = tf.gradients([-self.qlearning_reference.loss],
[self.q_tm1])
self.assertAllClose(sess.run(gradients[0]),
sess.run(gradients_reference[0]))
开发者ID:wmiao1769,项目名称:trfl,代码行数:7,代码来源:action_value_ops_test.py
示例4: test_second_order
def test_second_order(self):
with self.test_session() as sess:
x = Normal(0.0, 1.0)
y = 2 * (x ** 2)
z = tf.gradients(y, x)[0]
z = tf.gradients(z, x)[0]
self.assertEqual(z.eval(), 4.0)
开发者ID:JoyceYa,项目名称:edward,代码行数:7,代码来源:random_variable_gradients_test.py
示例5: testCustomGetter
def testCustomGetter(self):
custom_getter = snt.custom_getters.Context(snt.custom_getters.stop_gradient)
module = snt.nets.ConvNet2D(output_channels=self.output_channels,
kernel_shapes=self.kernel_shapes,
rates=self.rates,
strides=self.strides,
paddings=self.paddings,
custom_getter=custom_getter)
input_shape = [10, 100, 100, 3]
input_to_net = tf.random_normal(dtype=tf.float32, shape=input_shape)
if tf.executing_eagerly():
with tf.GradientTape() as tape0:
out0 = module(input_to_net)
with tf.GradientTape() as tape1:
with custom_getter:
out1 = module(input_to_net)
all_vars = tf.trainable_variables()
out0_grads = tape0.gradient(out0, all_vars)
out1_grads = tape1.gradient(out1, all_vars)
else:
out0 = module(input_to_net)
with custom_getter:
out1 = module(input_to_net)
all_vars = tf.trainable_variables()
out0_grads = tf.gradients(out0, all_vars)
out1_grads = tf.gradients(out1, all_vars)
for grad in out0_grads:
self.assertNotEqual(None, grad)
self.assertEqual([None] * len(out1_grads), out1_grads)
开发者ID:ccchang0111,项目名称:sonnet,代码行数:33,代码来源:convnet_test.py
示例6: test_grads_at_sample_pts_with_yes_preserve_gradients
def test_grads_at_sample_pts_with_yes_preserve_gradients(self):
dist = tfp.distributions.Normal(np.float64(0), np.float64(1))
x = dist.sample(10001, seed=0)
# 50th quantile will lie exactly on a data point.
# 49.123... will not
q = tf.constant(np.array([50, 49.123456789])) # Percentiles, in [0, 100]
analytic_pct = dist.quantile(q / 100.) # divide by 10 to make quantile.
sample_pct = tfp.stats.percentile(
x, q, interpolation='linear', preserve_gradients=True)
analytic_pct, d_analytic_pct_dq, sample_pct, d_sample_pct_dq = (
self.evaluate([
analytic_pct,
tf.gradients(analytic_pct, q)[0],
sample_pct,
tf.gradients(sample_pct, q)[0],
]))
self.assertAllClose(analytic_pct, sample_pct, atol=0.05)
# Near the median, the normal PDF is approximately constant C, with
# C = 1 / sqrt(2 * pi). So the cdf is approximately F(x) = x / C.
# Thus the quantile function is approximately F^{-1}(y) = C * y.
self.assertAllClose(np.sqrt(2 * np.pi) / 100 * np.ones([2]),
d_analytic_pct_dq, atol=1e-4)
# At the 50th percentile exactly, the sample gradient not exactly zero!
# This is due to preserve_gradient == True.
self.assertNotEqual(0., d_sample_pct_dq[0])
# Tolerance is terrible (2x), but this is a sample quantile based gradient.
self.assertAllClose(d_analytic_pct_dq, d_sample_pct_dq, atol=0, rtol=2)
# The absolute values are close though (but tiny).
self.assertAllClose(d_analytic_pct_dq, d_sample_pct_dq, atol=0.1, rtol=0)
开发者ID:asudomoeva,项目名称:probability,代码行数:35,代码来源:quantiles_test.py
示例7: config
def config(self,
loss,
train_batch_size,
learning_rate = 0.5,
momentum = 0.9,
permute = False,
ecrit = 0.01,
test_func = None,
wrange = None):
self._loss = loss(self.model['labels'], self.model['network'][-1].act)
self._opt = tf.train.MomentumOptimizer(learning_rate, momentum)
self.settings['loss_func'] = loss
self.settings['batch_size'] = train_batch_size
self.settings['lrate'] = learning_rate
self.settings['mrate'] = momentum
self.settings['permute'] = permute
self.settings['ecrit'] = ecrit
self.settings['test_func'] = test_func
self.settings['opt_task'] = self._opt.minimize(self._loss, global_step=self._global_step)
self.settings['saver'] = tf.train.Saver(max_to_keep = 0)
for l in self.model['network']:
# When run in current session tf.gradients returns a list of numpy arrays with
# batch_size number of rows and Layer.size number of columns.
if wrange is not None:
with self.sess.as_default():
l.assign_weights(wrange)
l.ded_net = tf.gradients(self._loss, l.net)
l.ded_act = tf.gradients(self._loss, l.act)
l.ded_W = tf.gradients(self._loss, l.W)
l.ded_b = tf.gradients(self._loss, l.b)
init = init_rest()
self.sess.run(init)
开发者ID:alex-ten,项目名称:PDP,代码行数:34,代码来源:Network.py
示例8: rothk_penalty
def rothk_penalty(self, d_real, d_fake):
config = self.config
g_sample = self.gan.uniform_sample
x = self.gan.inputs.x
gradx = tf.gradients(d_real, [x])[0]
gradg = tf.gradients(d_fake, [g_sample])[0]
gradx = tf.reshape(gradx, [self.ops.shape(gradx)[0], -1])
gradg = tf.reshape(gradg, [self.ops.shape(gradg)[0], -1])
gradx_norm = tf.norm(gradx, axis=1, keep_dims=True)
gradg_norm = tf.norm(gradg, axis=1, keep_dims=True)
if int(gradx_norm.get_shape()[0]) != int(d_real.get_shape()[0]):
print("Condensing along batch for rothk")
gradx_norm = tf.reduce_mean(gradx_norm, axis=0)
gradg_norm = tf.reduce_mean(gradg_norm, axis=0)
gradx = tf.square(gradx_norm) * tf.square(1-tf.nn.sigmoid(d_real))
gradg = tf.square(gradg_norm) * tf.square(tf.nn.sigmoid(d_fake))
loss = gradx + gradg
loss *= config.rothk_lambda or 1
if config.rothk_decay:
decay_function = config.decay_function or tf.train.exponential_decay
decay_steps = config.decay_steps or 50000
decay_rate = config.decay_rate or 0.9
decay_staircase = config.decay_staircase or False
global_step = tf.train.get_global_step()
loss = decay_function(loss, global_step, decay_steps, decay_rate, decay_staircase)
return loss
开发者ID:255BITS,项目名称:hyperchamber-gan,代码行数:27,代码来源:base_loss.py
示例9: apply_gradients
def apply_gradients(self, grads_and_vars, global_step=None, name=None):
var_list = [ v for _,v in grads_and_vars]
d_vars = []
g_vars = []
all_grads = [ g for g, _ in grads_and_vars ]
for grad,var in grads_and_vars:
if var in self.gan.d_vars():
d_vars += [var]
elif var in self.gan.g_vars():
g_vars += [var]
else:
raise("Couldn't find var in g_vars or d_vars")
with ops.init_scope():
self.optimizer._create_slots([v for g,v in grads_and_vars])
self._prepare()
d_grads = all_grads[:len(d_vars)]
if self.config.type == 'sga':
Jgrads = tf.gradients(d_grads, d_vars, grad_ys=d_grads, stop_gradients=d_vars) + [tf.zeros_like(g) for g in g_vars]
elif self.config.type == 'magnitude':
consensus_reg = [tf.square(g) for g in d_grads if g is not None]
Jgrads = tf.gradients(consensus_reg, d_vars) + [tf.zeros_like(g) for g in g_vars]
else:
consensus_reg = 0.5 * sum(
tf.reduce_sum(tf.square(g)) for g in d_grads if g is not None
)
Jgrads = tf.gradients(consensus_reg, d_vars, stop_gradients=d_vars) + [tf.zeros_like(g) for g in g_vars]
new_grads = [g+jg*self._beta if jg is not None else g for g,v,jg in zip(all_grads, var_list, Jgrads)]
new_grads_and_vars = list(zip(new_grads, var_list)).copy()
return self.optimizer.apply_gradients(new_grads_and_vars, global_step=global_step, name=name)
开发者ID:255BITS,项目名称:hyperchamber-gan,代码行数:31,代码来源:consensus_optimizer.py
示例10: test_state_grads
def test_state_grads(sess):
v = tf.Variable([0., 0., 0.])
x = tf.ones((3,))
y0 = tf.assign(v, x)
y1 = tf.assign_add(v, x)
grad0 = tf.gradients(y0, [v, x])
grad1 = tf.gradients(y1, [v, x])
grad_vals = sess.run((grad0, grad1))
assert np.allclose(grad_vals[0][0], 0)
assert np.allclose(grad_vals[0][1], 1)
assert np.allclose(grad_vals[1][0], 1)
assert np.allclose(grad_vals[1][1], 1)
v = tf.Variable([0., 0., 0.])
x = tf.ones((1,))
y0 = tf.scatter_update(v, [0], x)
y1 = tf.scatter_add(v, [0], x)
grad0 = tf.gradients(y0, [v._ref(), x])
grad1 = tf.gradients(y1, [v._ref(), x])
grad_vals = sess.run((grad0, grad1))
assert np.allclose(grad_vals[0][0], [0, 1, 1])
assert np.allclose(grad_vals[0][1], 1)
assert np.allclose(grad_vals[1][0], 1)
assert np.allclose(grad_vals[1][1], 1)
开发者ID:nengo,项目名称:nengo_deeplearning,代码行数:31,代码来源:test_tensorflow_patch.py
示例11: testTotalLossGradients
def testTotalLossGradients(self, is_multi_actions):
with self.test_session() as sess:
if is_multi_actions:
total_loss = tf.reduce_sum(self.multi_op.loss)
policy_logits_nest = self.multi_policy_logits
else:
total_loss = tf.reduce_sum(self.op.loss)
policy_logits_nest = self.policy_logits
grad_policy_list = [
tf.gradients(total_loss, policy_logits)[0]
for policy_logits in nest.flatten(policy_logits_nest)]
grad_baseline = tf.gradients(total_loss, self.baseline_values)[0]
for grad_policy in grad_policy_list:
self.assertEqual(grad_policy.get_shape(), tf.TensorShape([2, 1, 3]))
# These values were just generated once and hard-coded here to check for
# regressions. Calculating by hand would be too time-consuming,
# error-prone and unreadable.
self.assertAllClose(sess.run(grad_policy),
[[[-0.5995, 0.1224, 0.4770]],
[[0.0288, -0.0576, 0.0288]]],
atol=1e-4)
self.assertEqual(grad_baseline.get_shape(), tf.TensorShape([2, 1]))
self.assertAllClose(sess.run(grad_baseline), [[-0.1083], [-0.0420]],
atol=1e-4)
self.assertAllEqual(tf.gradients(total_loss, self.invalid_grad_inputs),
self.invalid_grad_outputs)
开发者ID:wmiao1769,项目名称:trfl,代码行数:29,代码来源:discrete_policy_gradient_ops_test.py
示例12: testBaselineGradients
def testBaselineGradients(self):
loss = self.op.extra.baseline_loss
grad_baseline = tf.gradients(loss, self.baseline_values)[0]
self.assertEqual(grad_baseline.get_shape(), tf.TensorShape([2, 1]))
self.assertAllEqual(tf.gradients(loss, self.policy_logits), [None])
self.assertAllEqual(tf.gradients(loss, self.invalid_grad_inputs),
self.invalid_grad_outputs)
开发者ID:wmiao1769,项目名称:trfl,代码行数:7,代码来源:discrete_policy_gradient_ops_test.py
示例13: leapfrogs
def leapfrogs(self, z, T, friction, step_size, x):
v_0 = tf.random_normal((self.n_particles, self.batch_size, self.n_z), 0, 1, dtype=tf.float32)
log_p = self._log_likelihood(x, self._generator_network(z, self.network_weights['decoder_weights'], self.network_weights['decoder_biases'])) + self._log_p_z(z)
grad = -tf.gradients(log_p, [z])[0]
v = v_0 - ((.5*step_size) * grad)
z = z + (step_size * v)
for t in range(T-1):
log_p = self._log_likelihood(x, self._generator_network(z, self.network_weights['decoder_weights'], self.network_weights['decoder_biases'])) + self._log_p_z(z)
grad = -tf.gradients(log_p, [z])[0]
v = v - (step_size * grad)
z = z + (step_size * v)
v = friction * v
log_p = self._log_likelihood(x, self._generator_network(z, self.network_weights['decoder_weights'], self.network_weights['decoder_biases'])) + self._log_p_z(z)
grad = -tf.gradients(log_p, [z])[0]
v = v - ((.5*step_size) * grad)
return z, v_0, v
开发者ID:chriscremer,项目名称:Other_Code,代码行数:27,代码来源:HIAE.py
示例14: testNanFromGradsDontPropagate
def testNanFromGradsDontPropagate(self):
"""Test that update with NaN gradients does not cause NaN in results."""
def _nan_log_prob_with_nan_gradient(x):
return np.nan * tf.reduce_sum(x)
initial_x = tf.linspace(0.01, 5, 10)
hmc = tfp.mcmc.HamiltonianMonteCarlo(
target_log_prob_fn=_nan_log_prob_with_nan_gradient,
step_size=2.,
num_leapfrog_steps=5,
seed=_set_seed(47))
updated_x, kernel_results = hmc.one_step(
current_state=initial_x,
previous_kernel_results=hmc.bootstrap_results(initial_x))
initial_x_, updated_x_, log_accept_ratio_ = self.evaluate(
[initial_x, updated_x, kernel_results.log_accept_ratio])
acceptance_probs = np.exp(np.minimum(log_accept_ratio_, 0.))
tf.logging.vlog(1, 'initial_x = {}'.format(initial_x_))
tf.logging.vlog(1, 'updated_x = {}'.format(updated_x_))
tf.logging.vlog(1, 'log_accept_ratio = {}'.format(log_accept_ratio_))
self.assertAllEqual(initial_x_, updated_x_)
self.assertEqual(acceptance_probs, 0.)
self.assertAllFinite(
self.evaluate(tf.gradients(updated_x, initial_x)[0]))
self.assertAllEqual(
[True],
[g is None for g in tf.gradients(
kernel_results.proposed_results.grads_target_log_prob,
initial_x)])
开发者ID:asudomoeva,项目名称:probability,代码行数:32,代码来源:hmc_test.py
示例15: get_dynamic_rebar_gradient
def get_dynamic_rebar_gradient(self):
"""Get the dynamic rebar gradient (t, eta optimized)."""
tiled_pre_temperature = tf.tile([self.pre_temperature_variable],
[self.batch_size])
temperature = tf.exp(tiled_pre_temperature)
hardELBO, nvil_gradient, logQHard = self._create_hard_elbo()
if self.hparams.quadratic:
gumbel_cv, extra = self._create_gumbel_control_variate_quadratic(logQHard, temperature=temperature)
else:
gumbel_cv, extra = self._create_gumbel_control_variate(logQHard, temperature=temperature)
f_grads = self.optimizer_class.compute_gradients(tf.reduce_mean(-nvil_gradient))
eta = {}
h_grads, eta_statistics = self.multiply_by_eta_per_layer(
self.optimizer_class.compute_gradients(tf.reduce_mean(gumbel_cv)),
eta)
model_grads = U.add_grads_and_vars(f_grads, h_grads)
total_grads = model_grads
# Construct the variance objective
g = U.vectorize(model_grads, set_none_to_zero=True)
self.maintain_ema_ops.append(self.ema.apply([g]))
gbar = 0 #tf.stop_gradient(self.ema.average(g))
variance_objective = tf.reduce_mean(tf.square(g - gbar))
reinf_g_t = 0
if self.hparams.quadratic:
for layer in xrange(self.hparams.n_layer):
gumbel_learning_signal, _ = extra[layer]
df_dt = tf.gradients(gumbel_learning_signal, tiled_pre_temperature)[0]
reinf_g_t_i, _ = self.multiply_by_eta_per_layer(
self.optimizer_class.compute_gradients(tf.reduce_mean(tf.stop_gradient(df_dt) * logQHard[layer])),
eta)
reinf_g_t += U.vectorize(reinf_g_t_i, set_none_to_zero=True)
reparam = tf.add_n([reparam_i for _, reparam_i in extra])
else:
gumbel_learning_signal, reparam = extra
df_dt = tf.gradients(gumbel_learning_signal, tiled_pre_temperature)[0]
reinf_g_t, _ = self.multiply_by_eta_per_layer(
self.optimizer_class.compute_gradients(tf.reduce_mean(tf.stop_gradient(df_dt) * tf.add_n(logQHard))),
eta)
reinf_g_t = U.vectorize(reinf_g_t, set_none_to_zero=True)
reparam_g, _ = self.multiply_by_eta_per_layer(
self.optimizer_class.compute_gradients(tf.reduce_mean(reparam)),
eta)
reparam_g = U.vectorize(reparam_g, set_none_to_zero=True)
reparam_g_t = tf.gradients(tf.reduce_mean(2*tf.stop_gradient(g - gbar)*reparam_g), self.pre_temperature_variable)[0]
variance_objective_grad = tf.reduce_mean(2*(g - gbar)*reinf_g_t) + reparam_g_t
debug = { 'ELBO': hardELBO,
'etas': eta_statistics,
'variance_objective': variance_objective,
}
return total_grads, debug, variance_objective, variance_objective_grad
开发者ID:ALISCIFP,项目名称:models,代码行数:60,代码来源:rebar.py
示例16: leapfrog_step
def leapfrog_step(x0,
v0,
log_posterior,
step_size,
num_steps):
# Start by updating the velocity a half-step
v = v0 - 0.5 * step_size * tf.gradients(log_posterior(x0), x0)[0]
# Initalize x to be the first step
x = x0 + step_size * v
for i in xrange(num_steps):
# Compute gradient of the log-posterior with respect to x
gradient = tf.gradients(log_posterior(x), x)[0]
# Update velocity
v = v - step_size * gradient
# Update x
x = x + step_size * v
# Do a final update of the velocity for a half step
v = v - 0.5 * step_size * tf.gradients(log_posterior(x), x)[0]
# return new proposal state
return x, v
开发者ID:arahuja,项目名称:hamiltonian-monte-carlo,代码行数:27,代码来源:hamiltonian_monte_carlo.py
示例17: test_gradient
def test_gradient(self):
"""
Test the correctness of the gradient against tensorflow
"""
if ovl.cuda_enabled:
devices = ['/cpu:0', '/gpu:0']
else:
devices = ['/cpu:0']
# ensure TF runs on GPU when asked
test_config=tf.ConfigProto(allow_soft_placement=False)
test_config.graph_options.optimizer_options.opt_level = -1
with tf.Session(config=test_config) as sess:
for dev_string in devices:
with tf.device(dev_string):
a = np.random.random(100)
grad_input = tf.constant(np.random.random(100))
arg = tf.constant(a)
ovl_op = log1p(arg)
ones = tf.constant(np.ones_like(a))
ovl_out = ovl.as_tensorflow(ovl_op)
tf_out = tf.log(arg + ones)
ovl_grad = tf.gradients(ovl_out, arg, grad_input)[0]
tf_grad = tf.gradients(tf_out, arg, grad_input)[0]
ovl_out, tf_out, ovl_grad, tf_grad = sess.run([ovl_out, tf_out, ovl_grad, tf_grad])
assert np.allclose(ovl_out, tf_out)
assert np.allclose(ovl_grad, tf_grad)
sess.close()
开发者ID:hewlettpackardlabs,项目名称:opveclib,代码行数:29,代码来源:test_log1p.py
示例18: testLSTMBasicToBlockPeeping
def testLSTMBasicToBlockPeeping(self):
with self.test_session(use_gpu=self._use_gpu) as sess:
batch_size = 2
input_size = 3
cell_size = 4
sequence_length = 5
inputs = []
for _ in range(sequence_length):
inp = tf.convert_to_tensor(
np.random.randn(batch_size, input_size),
dtype=tf.float32)
inputs.append(inp)
initializer = tf.random_uniform_initializer(-0.01, 0.01, seed=19890212)
with tf.variable_scope("basic", initializer=initializer):
cell = tf.nn.rnn_cell.LSTMCell(cell_size,
use_peepholes=True,
state_is_tuple=True)
outputs, _ = tf.nn.rnn(cell, inputs, dtype=tf.float32)
sess.run([tf.initialize_all_variables()])
basic_outputs = sess.run(outputs)
basic_grads = sess.run(tf.gradients(outputs, inputs))
basic_wgrads = sess.run(tf.gradients(outputs, tf.trainable_variables()))
with tf.variable_scope("block", initializer=initializer):
w = tf.get_variable("w",
shape=[input_size + cell_size, cell_size * 4],
dtype=tf.float32)
b = tf.get_variable("b",
shape=[cell_size * 4],
dtype=tf.float32,
initializer=tf.zeros_initializer)
wci = tf.get_variable("wci", shape=[cell_size], dtype=tf.float32)
wcf = tf.get_variable("wcf", shape=[cell_size], dtype=tf.float32)
wco = tf.get_variable("wco", shape=[cell_size], dtype=tf.float32)
_, _, _, _, _, _, outputs = fused_lstm(
tf.convert_to_tensor(sequence_length,
dtype=tf.int64),
inputs,
w,
b,
wci=wci,
wcf=wcf,
wco=wco,
cell_clip=0,
use_peephole=True)
sess.run([tf.initialize_all_variables()])
block_outputs = sess.run(outputs)
block_grads = sess.run(tf.gradients(outputs, inputs))
block_wgrads = sess.run(tf.gradients(outputs, [w, b, wci, wcf, wco]))
self.assertAllClose(basic_outputs, block_outputs)
self.assertAllClose(basic_grads, block_grads)
for basic, block in zip(basic_wgrads, block_wgrads):
self.assertAllClose(basic, block, rtol=1e-2, atol=1e-2)
开发者ID:10imaging,项目名称:tensorflow,代码行数:60,代码来源:lstm_ops_test.py
示例19: testReduction
def testReduction(self):
g = tf.Graph()
# BN0 is computing batch normed matrix along rows.
def BN0(x):
mean = tf.reduce_mean(x, [0])
var = tf.reduce_mean(tf.square(x - mean)) # biased var
rstd = tf.rsqrt(var + 1e-8)
return (x - mean) * rstd
# Wraps BatchNorm in a tf function.
@function.Defun(tf.float32)
def BN1(x):
return BN0(x)
with g.as_default():
x = tf.placeholder(tf.float32)
y0 = BN0(x) # A plain graph
y1 = BN1(x) # A tf function
dx0, = tf.gradients([y0], [x])
dx1, = tf.gradients([y1], [x])
# Both should produce the same result and gradient.
with self.test_session(graph=g) as sess:
vals = sess.run([y0, y1, dx0, dx1], {x: np.random.uniform(size=(3, 7))})
self.assertAllClose(vals[0], vals[1])
self.assertAllClose(vals[2], vals[3])
开发者ID:BloodD,项目名称:tensorflow,代码行数:27,代码来源:function_test.py
示例20: main
def main():
x_data = np.asarray([2])
t_data = np.asarray([3])
W = tf.constant([3.0])
y = W * x_data
z = y * t_data
# Before starting, initialize the variables. We will 'run' this first.
init = tf.initialize_all_variables()
# Launch the graph.
sess = tf.Session()
sess.run(init)
print sess.run(y)
print sess.run(z)
gd1 = tf.gradients(y, W)
print sess.run(gd1)
gd2 = tf.gradients(z, y, gd1)
print sess.run(gd2)
gd = tf.gradients(z, W)
print sess.run(gd)
return
开发者ID:yinchuandong,项目名称:python_algorithm,代码行数:28,代码来源:test_tf.py
注:本文中的tensorflow.gradients函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。 |
请发表评论