本文整理汇总了Python中tensorflow.python.ops.gradients_impl.gradients函数的典型用法代码示例。如果您正苦于以下问题:Python gradients函数的具体用法?Python gradients怎么用?Python gradients使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了gradients函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。
示例1: testNanFromGradsDontPropagate
def testNanFromGradsDontPropagate(self):
"""Test that update with NaN gradients does not cause NaN in results."""
def _nan_log_prob_with_nan_gradient(x):
return np.nan * math_ops.reduce_sum(x)
with self.test_session() as sess:
initial_x = math_ops.linspace(0.01, 5, 10)
updated_x, acceptance_probs, new_log_prob, new_grad = hmc.kernel(
2., 5, initial_x, _nan_log_prob_with_nan_gradient, [0])
initial_x_val, updated_x_val, acceptance_probs_val = sess.run(
[initial_x, updated_x, acceptance_probs])
logging.vlog(1, 'initial_x = {}'.format(initial_x_val))
logging.vlog(1, 'updated_x = {}'.format(updated_x_val))
logging.vlog(1, 'acceptance_probs = {}'.format(acceptance_probs_val))
self.assertAllEqual(initial_x_val, updated_x_val)
self.assertEqual(acceptance_probs_val, 0.)
self.assertAllFinite(
gradients_impl.gradients(updated_x, initial_x)[0].eval())
self.assertTrue(
gradients_impl.gradients(new_grad, initial_x)[0] is None)
# Gradients of the acceptance probs and new log prob are not finite.
_ = new_log_prob # Prevent unused arg error.
开发者ID:AbhinavJain13,项目名称:tensorflow,代码行数:26,代码来源:hmc_test.py
示例2: testReduction
def testReduction(self):
g = ops.Graph()
# BN0 is computing batch normed matrix along rows.
def BN0(x):
mean = math_ops.reduce_mean(x, [0])
var = math_ops.reduce_mean(math_ops.square(x - mean)) # biased var
rstd = math_ops.rsqrt(var + 1e-8)
return (x - mean) * rstd
# Wraps BatchNorm in a tf function.
@function.Defun(dtypes.float32)
def BN1(x):
return BN0(x)
with g.as_default():
x = array_ops.placeholder(dtypes.float32)
y0 = BN0(x) # A plain graph
y1 = BN1(x) # A tf function
dx0, = gradients_impl.gradients([y0], [x])
dx1, = gradients_impl.gradients([y1], [x])
# Both should produce the same result and gradient.
with self.test_session(graph=g) as sess:
vals = sess.run([y0, y1, dx0, dx1], {x: np.random.uniform(size=(3, 7))})
self.assertAllClose(vals[0], vals[1])
self.assertAllClose(vals[2], vals[3])
开发者ID:AbhinavJain13,项目名称:tensorflow,代码行数:27,代码来源:function_test.py
示例3: body
def body(it, cost):
embedding = embedding_ops.embedding_lookup(embedding_matrix, [0])
cost = control_flow_ops.cond(
math_ops.equal(it, 3), lambda: math_ops.square(cost),
(lambda: cost + math_ops.reduce_sum(embedding)))
return it + 1, cost
_, cost = control_flow_ops.while_loop(
cond, body, [constant_op.constant(0),
constant_op.constant(0.0)])
dynamic_grads = gradients_impl.gradients(cost, [embedding_matrix])[0]
dynamic_grads = math_ops.segment_sum(dynamic_grads.values,
dynamic_grads.indices)
embedding = embedding_ops.embedding_lookup(embedding_matrix, [0])
static = math_ops.square(
math_ops.reduce_sum(embedding) + math_ops.reduce_sum(embedding) +
math_ops.reduce_sum(embedding)) + math_ops.reduce_sum(embedding)
static_grads = gradients_impl.gradients(static, [embedding_matrix])[0]
static_grads = math_ops.segment_sum(static_grads.values,
static_grads.indices)
with self.cached_session():
self.evaluate(variables.global_variables_initializer())
self.assertAllEqual(*self.evaluate([static_grads, dynamic_grads]))
开发者ID:Wajih-O,项目名称:tensorflow,代码行数:26,代码来源:control_flow_ops_test.py
示例4: testShapePassedToGradient
def testShapePassedToGradient(self):
with ops.Graph().as_default():
@custom_gradient.custom_gradient
def differentiable_scatter_update(handle, indices, values):
with ops.control_dependencies([
resource_variable_ops.resource_scatter_update(
handle, indices, values)]):
new_handle = array_ops.identity(handle)
def grad(dresult):
self.assertIsNotNone(
tensor_util.constant_value(dresult.dense_shape))
return [dresult, None, None]
return new_handle, grad
var = variable_scope.get_variable(
"foo", shape=[20], initializer=init_ops.zeros_initializer,
dtype=dtypes.float64, use_resource=True)
indices = math_ops.range(10)
updates = math_ops.range(9, -1, -1, dtype=dtypes.float64)
new_handle = differentiable_scatter_update(var.handle, indices, updates)
gathered = resource_variable_ops.resource_gather(
new_handle, indices, dtype=var.dtype)
gradients_impl.gradients([gathered], [updates])
开发者ID:Wajih-O,项目名称:tensorflow,代码行数:26,代码来源:resource_variable_ops_test.py
示例5: testNanFromGradsDontPropagate
def testNanFromGradsDontPropagate(self):
"""Test that update with NaN gradients does not cause NaN in results."""
def _nan_log_prob_with_nan_gradient(x):
return np.nan * math_ops.reduce_sum(x)
with self.test_session() as sess:
initial_x = math_ops.linspace(0.01, 5, 10)
updated_x, kernel_results = hmc.kernel(
target_log_prob_fn=_nan_log_prob_with_nan_gradient,
current_state=initial_x,
step_size=2.,
num_leapfrog_steps=5,
seed=47)
initial_x_, updated_x_, acceptance_probs_ = sess.run(
[initial_x, updated_x, kernel_results.acceptance_probs])
logging_ops.vlog(1, "initial_x = {}".format(initial_x_))
logging_ops.vlog(1, "updated_x = {}".format(updated_x_))
logging_ops.vlog(1, "acceptance_probs = {}".format(acceptance_probs_))
self.assertAllEqual(initial_x_, updated_x_)
self.assertEqual(acceptance_probs_, 0.)
self.assertAllFinite(
gradients_ops.gradients(updated_x, initial_x)[0].eval())
self.assertAllEqual([True], [g is None for g in gradients_ops.gradients(
kernel_results.proposed_grads_target_log_prob, initial_x)])
self.assertAllEqual([False], [g is None for g in gradients_ops.gradients(
kernel_results.proposed_grads_target_log_prob,
kernel_results.proposed_state)])
开发者ID:ClowJ,项目名称:tensorflow,代码行数:30,代码来源:hmc_test.py
示例6: testWithIsRecomputeKwarg
def testWithIsRecomputeKwarg(self):
kwarg_values = []
@rev_block_lib.recompute_grad
def layer_with_recompute(inputs, is_recomputing=False):
kwarg_values.append(is_recomputing)
out = core_layers.dense(inputs, 2)
out = normalization_layers.batch_normalization(out, training=True)
if is_recomputing:
# Ensure that the updates are not duplicated by popping off the latest
# 2 additions.
update_ops = ops.get_collection_ref(ops.GraphKeys.UPDATE_OPS)
update_ops.pop()
update_ops.pop()
return out
x = array_ops.ones((2, 4), dtypes.float32)
with variable_scope.variable_scope("layer1", use_resource=True):
y = layer_with_recompute(x)
loss = math_ops.reduce_sum(y)
tvars = variables.trainable_variables()
gradients_impl.gradients(loss, [x] + tvars)
update_ops = ops.get_collection(ops.GraphKeys.UPDATE_OPS)
self.assertEqual(2, len(update_ops))
self.assertEqual([False, True], kwarg_values)
开发者ID:clsung,项目名称:tensorflow,代码行数:27,代码来源:rev_block_lib_test.py
示例7: __getitem__
def __getitem__(self, spec):
slice_var = self.var[spec]
slice_val = self.val[spec]
# compute analytic 2nd derivative
analytic_grad2 = 2 * slice_val
dy = variables.Variable(
array_ops.ones(
shape=slice_var.get_shape(), dtype=dtypes.int32))
assign = dy.assign(slice_var)
slice_val_grad, = gradients_impl.gradients(slice_val, self.var, grad_ys=dy)
slice_val_grad2, = gradients_impl.gradients(
slice_val_grad, dy, grad_ys=self.var)
self.sess.run(assign)
slice_val_grad_evaled, slice_val_grad2_evaled = (
self.sess.run([slice_val_grad, slice_val_grad2]))
analytic_grad2_evaled = analytic_grad2.eval()
self.test.assertAllEqual(slice_val_grad2_evaled, analytic_grad2_evaled)
# compute analytic gradient for slice
np_val_grad = (2 * self.varnp * self.varnp)
np_sliceval_grad = np.zeros(self.var.get_shape())
np_sliceval_grad[spec] = np_val_grad[spec]
# verify gradient
self.test.assertAllEqual(slice_val_grad_evaled, np_sliceval_grad)
开发者ID:AlbertXiebnu,项目名称:tensorflow,代码行数:26,代码来源:array_ops_test.py
示例8: _create_multi_lstm_cell_ops
def _create_multi_lstm_cell_ops(batch_size, num_units, input_depth,
num_layers, max_time, compiled):
with variable_scope.variable_scope(
"root",
initializer=init_ops.random_uniform_initializer(-0.1, 0.1, seed=2)):
inputs = variable_scope.get_variable(
"inputs", initializer=random_ops.random_uniform(
(max_time, batch_size, input_depth), seed=1))
maybe_xla = lambda c: rnn_cell.CompiledWrapper(c) if compiled else c
cell = core_rnn_cell_impl.MultiRNNCell(
[maybe_xla(core_rnn_cell_impl.LSTMCell(num_units))
for _ in range(num_layers)])
initial_state = cell.zero_state(
batch_size=batch_size, dtype=dtypes.float32)
outputs, final_state = rnn.dynamic_rnn(
cell=cell, inputs=inputs, initial_state=initial_state,
time_major=True)
flat_final_state = nest.flatten(final_state)
trainable_variables = variables.trainable_variables()
outputs_grad = gradients_impl.gradients(
[outputs],
trainable_variables + [inputs] + nest.flatten(initial_state))
final_state_grad = gradients_impl.gradients(
flat_final_state,
trainable_variables + [inputs] + nest.flatten(initial_state))
return {"outputs": outputs,
"final_state": flat_final_state,
"outputs_grad": outputs_grad,
"final_state_grad": final_state_grad}
开发者ID:Jackhuang945,项目名称:tensorflow,代码行数:30,代码来源:rnn_cell_test.py
示例9: fwd_gradients
def fwd_gradients(ys, xs, grad_xs=None, stop_gradients=None):
"""Compute forward-mode gradients."""
# See b/37888268.
# This version of forward-mode autodiff is based on code by Tim Cooijmans
# and handles list arguments and certain special cases such as when the
# ys doesn't depend on one or more of the xs, and when ops.IndexedSlices are
# generated by the first gradients_impl.gradients call.
us = [array_ops.zeros_like(y) + float("nan") for y in ys]
dydxs = gradients_impl.gradients(
ys, xs, grad_ys=us, stop_gradients=stop_gradients)
# Deal with strange types that gradients_impl.gradients returns but can't
# deal with.
dydxs = [
ops.convert_to_tensor(dydx)
if isinstance(dydx, ops.IndexedSlices) else dydx for dydx in dydxs
]
dydxs = [
array_ops.zeros_like(x) if dydx is None else dydx
for x, dydx in zip(xs, dydxs)
]
dysdx = gradients_impl.gradients(dydxs, us, grad_ys=grad_xs)
return dysdx
开发者ID:abidrahmank,项目名称:tensorflow,代码行数:27,代码来源:utils.py
示例10: doTestIndexedSlicesGradientInCondInWhileLoop
def doTestIndexedSlicesGradientInCondInWhileLoop(self, use_resource=False):
with ops.Graph().as_default():
embedding_matrix = variable_scope.get_variable(
"embedding_matrix", [5, 5],
initializer=init_ops.random_normal_initializer(),
use_resource=use_resource)
def Cond(it, _):
return it < 5
def Body(it, cost):
embedding = embedding_ops.embedding_lookup(embedding_matrix, [0])
cost = control_flow_ops.cond(
math_ops.equal(it, 3), lambda: math_ops.square(cost),
lambda: cost + math_ops.reduce_sum(embedding))
return it + 1, cost
_, cost = control_flow_ops.while_loop(
Cond, Body, [constant_op.constant(0), constant_op.constant(0.0)])
dynamic_grads = gradients_impl.gradients(cost, [embedding_matrix])[0]
dynamic_grads = math_ops.segment_sum(dynamic_grads.values,
dynamic_grads.indices)
embedding = embedding_ops.embedding_lookup(embedding_matrix, [0])
static = math_ops.square(
math_ops.reduce_sum(embedding) + math_ops.reduce_sum(embedding) +
math_ops.reduce_sum(embedding)) + math_ops.reduce_sum(embedding)
static_grads = gradients_impl.gradients(static, [embedding_matrix])[0]
static_grads = math_ops.segment_sum(static_grads.values,
static_grads.indices)
with self.test_session() as sess:
sess.run(variables.global_variables_initializer())
self.assertAllEqual(*sess.run([static_grads, dynamic_grads]))
开发者ID:AlbertXiebnu,项目名称:tensorflow,代码行数:35,代码来源:control_flow_ops_test.py
示例11: _RunAndVerifyBackprop
def _RunAndVerifyBackprop(self, input_sizes, filter_sizes, output_sizes,
strides, dilations, padding, data_format, use_gpu,
err, mode):
total_input_size = 1
total_filter_size = 1
for s in input_sizes:
total_input_size *= s
for s in filter_sizes:
total_filter_size *= s
# Initializes the input tensor with array containing incrementing
# numbers from 1.
x1 = [f * 1.0 for f in range(1, total_input_size + 1)]
x2 = [f * 1.0 for f in range(1, total_filter_size + 1)]
default_dilations = (
dilations[0] == 1 and dilations[1] == 1 and dilations[2] == 1)
# If any dilation rate is larger than 1, only do test on the GPU
# because we currently do not have a CPU implementation for arbitrary
# dilation rates.
if default_dilations or use_gpu:
with self.cached_session(use_gpu=use_gpu) as sess:
if data_format == "NCDHW":
input_sizes = test_util.NHWCToNCHW(input_sizes)
t1 = constant_op.constant(x1, shape=input_sizes)
t2 = constant_op.constant(x2, shape=filter_sizes)
full_strides = [1] + strides + [1]
full_dilations = [1] + dilations + [1]
if data_format == "NCDHW":
full_strides = test_util.NHWCToNCHW(full_strides)
full_dilations = test_util.NHWCToNCHW(full_dilations)
actual = nn_ops.conv3d(
t1,
t2,
strides=full_strides,
dilations=full_dilations,
padding=padding,
data_format=data_format)
expected = nn_ops.convolution(
t1,
t2,
padding=padding,
strides=strides,
dilation_rate=dilations,
data_format=data_format)
if data_format == "NCDHW":
actual = test_util.NCHWToNHWC(actual)
expected = test_util.NCHWToNHWC(expected)
actual_grad = gradients_impl.gradients(actual, t1
if mode == "input" else t2)[0]
expected_grad = gradients_impl.gradients(expected, t1
if mode == "input" else t2)[0]
# "values" consists of two tensors for two backprops
actual_value = self.evaluate(actual_grad)
expected_value = self.evaluate(expected_grad)
self.assertShapeEqual(actual_value, actual_grad)
self.assertShapeEqual(expected_value, expected_grad)
print("expected = ", expected_value)
print("actual = ", actual_value)
self.assertArrayNear(expected_value.flatten(), actual_value.flatten(),
err)
开发者ID:adit-chandra,项目名称:tensorflow,代码行数:60,代码来源:conv_ops_3d_test.py
示例12: testGradientFloat16
def testGradientFloat16(self):
with self.test_session(use_gpu=True) as sess:
# Randomly construct a 1D shape from [1, 40)
shape = random_ops.random_uniform(
[1], minval=1, maxval=40, dtype=dtypes.int32)
# Construct the fp32 graph and its gradient.
x = random_ops.random_uniform(shape, minval=-1, maxval=1, name="x")
y1 = nn_ops.relu(x, name="relu_fp32")
l1 = nn_ops.l2_loss(y1)
dx_f32 = gradients_impl.gradients(l1, x)
# Construct the fp16 graph and its gradient.
# It starts with the same x, in fp32. But before it reaches Relu, it is
# cast into fp16. So during backprop, the gradient computation is in fp16.
x2 = math_ops.cast(x, dtype=dtypes.float16, name="cast")
y2 = nn_ops.relu(x2, name="relu_fp16")
l2 = nn_ops.l2_loss(y2)
dx_f16 = gradients_impl.gradients(l2, x)
# Repeat the experiment for 100 times. All tensor shapes and its tensor
# values are randomly generated for each run.
for _ in xrange(100):
dx_f32_v, dx_f16_v = sess.run([dx_f32, dx_f16])
self.assertAllClose(dx_f32_v, dx_f16_v, atol=3e-4)
开发者ID:HughKu,项目名称:tensorflow,代码行数:25,代码来源:relu_op_test.py
示例13: testSumOfTwoReadVariablesWithoutRepeatGrad
def testSumOfTwoReadVariablesWithoutRepeatGrad(self):
with self.test_session(use_gpu=True) as session:
a = array_ops.identity(
np.arange(
3 * 5, dtype=np.float32).reshape(3, 5) + 1)
b = array_ops.identity(
np.arange(
3 * 5, dtype=np.float32).reshape(3, 5) + 1 + 3 * 5)
ta = tensor_array_ops.TensorArray(dtype=dtypes.float32, size=2)
ta = ta.write(0, a, name="write_a")
ta = ta.write(1, b, name="write_b")
c = (
ta.read(
0, name="read_a_0") + # a + b
ta.read(
1, name="read_b_0"))
g0 = -(np.arange(3 * 5, dtype=np.float32).reshape(3, 5) + 1)
grad_a = gradients_impl.gradients([c], [a], [g0])[0] # d(a+b)/da = 1
grad_b = gradients_impl.gradients([c], [b], [g0])[0] # d(a+b)/db = 1
# Test gradients calculated individually
grad_a_t, = session.run([grad_a])
self.assertAllEqual(grad_a_t, g0)
grad_b_t, = session.run([grad_b])
self.assertAllEqual(grad_b_t, g0)
# Test gradients calculated jointly
joint_grad_a_t, joint_grad_b_t = session.run([grad_a, grad_b])
self.assertAllEqual(joint_grad_a_t, g0)
self.assertAllEqual(joint_grad_b_t, g0)
开发者ID:jzuern,项目名称:tensorflow,代码行数:31,代码来源:tensor_array_ops_test.py
示例14: testEntropyGradient
def testEntropyGradient(self):
with self.cached_session() as sess:
logits = constant_op.constant([[1., 2., 3.], [2., 5., 1.]])
probabilities = nn_ops.softmax(logits)
log_probabilities = nn_ops.log_softmax(logits)
true_entropy = - math_ops.reduce_sum(
probabilities * log_probabilities, axis=-1)
categorical_distribution = categorical.Categorical(probs=probabilities)
categorical_entropy = categorical_distribution.entropy()
# works
true_entropy_g = gradients_impl.gradients(true_entropy, [logits])
categorical_entropy_g = gradients_impl.gradients(
categorical_entropy, [logits])
res = sess.run({"true_entropy": true_entropy,
"categorical_entropy": categorical_entropy,
"true_entropy_g": true_entropy_g,
"categorical_entropy_g": categorical_entropy_g})
self.assertAllClose(res["true_entropy"],
res["categorical_entropy"])
self.assertAllClose(res["true_entropy_g"],
res["categorical_entropy_g"])
开发者ID:AnishShah,项目名称:tensorflow,代码行数:25,代码来源:categorical_test.py
示例15: fn
def fn():
ta = tensor_array_ops.TensorArray(
dtype=dtypes.as_dtype(dtype),
tensor_array_name="foo",
size=3,
infer_shape=False)
value_0 = constant_op.constant(c([[4.0, 5.0]]))
value_1 = constant_op.constant(c([[3.0, 3.5]]))
w0 = ta.write(0, value_0)
w1 = w0.write(1, value_1)
r0 = w1.read(0)
r1 = w1.read(1)
r0_2 = w1.read(0)
# Test individual components' gradients
grad_just_r0 = gradients_impl.gradients(
ys=[r0], xs=[value_0], grad_ys=[c([[2.0, 3.0]])])
grad_r0_r0_2 = gradients_impl.gradients(
ys=[r0, r0_2],
xs=[value_0],
grad_ys=[c([[2.0, 3.0]]), c([[1.0, -1.0]])])
grad_just_r1 = gradients_impl.gradients(
ys=[r1], xs=[value_1], grad_ys=[c([[-2.0, -4.0]])])
# Test combined gradients
grad = gradients_impl.gradients(
ys=[r0, r0_2, r1],
xs=[value_0, value_1],
grad_ys=[c([[2.0, 3.0]]),
c([[1.0, -1.0]]),
c([[-2.0, -10.0]])])
return [grad_just_r0, grad_r0_r0_2, grad_just_r1, grad]
开发者ID:Albert-Z-Guo,项目名称:tensorflow,代码行数:34,代码来源:tensor_array_ops_test.py
示例16: testSecondDerivative
def testSecondDerivative(self):
with self.test_session() as sess:
pred = array_ops.placeholder(dtypes.bool, name="pred")
x = constant_op.constant(3.0, name="x")
def true_fn():
return math_ops.pow(x, 3)
def false_fn():
return x
cond = cond_v2.cond_v2(pred, true_fn, false_fn, name="cond")
cond_grad = gradients_impl.gradients(cond, [x])
cond_grad_grad = gradients_impl.gradients(cond_grad, [x])
# d[x^3]/dx = 3x^2
true_val = sess.run(cond_grad, {pred: True})
self.assertEqual(true_val, [27.0])
# d[x]/dx = 1
false_val = sess.run(cond_grad, {pred: False})
self.assertEqual(false_val, [1.0])
true_val = sess.run(cond_grad_grad, {pred: True})
# d2[x^3]/dx2 = 6x
self.assertEqual(true_val, [18.0])
false_val = sess.run(cond_grad_grad, {pred: False})
# d2[x]/dx2 = 0
self.assertEqual(false_val, [0.0])
开发者ID:clsung,项目名称:tensorflow,代码行数:28,代码来源:cond_v2_test.py
示例17: _testCond
def _testCond(self, true_fn, false_fn, train_vals, feed_dict=None):
if not feed_dict:
feed_dict = {}
with self.test_session(graph=ops.get_default_graph()) as sess:
pred = array_ops.placeholder(dtypes.bool, name="pred")
expected = control_flow_ops.cond(pred, true_fn, false_fn, name="expected")
actual = cond_v2.cond_v2(pred, true_fn, false_fn, name="actual")
expected_grad = gradients_impl.gradients(expected, train_vals)
actual_grad = gradients_impl.gradients(actual, train_vals)
sess_run_args = {pred: True}
sess_run_args.update(feed_dict)
expected_val, actual_val, expected_grad_val, actual_grad_val = sess.run(
(expected, actual, expected_grad, actual_grad), sess_run_args)
self.assertEqual(expected_val, actual_val)
self.assertEqual(expected_grad_val, actual_grad_val)
sess_run_args = {pred: False}
sess_run_args.update(feed_dict)
expected_val, actual_val, expected_grad_val, actual_grad_val = sess.run(
(expected, actual, expected_grad, actual_grad), sess_run_args)
self.assertEqual(expected_val, actual_val)
self.assertEqual(expected_grad_val, actual_grad_val)
开发者ID:clsung,项目名称:tensorflow,代码行数:25,代码来源:cond_v2_test.py
示例18: testGradientThroughSingleBranchOutsideOfContext
def testGradientThroughSingleBranchOutsideOfContext(self):
x = constant_op.constant(2.)
s = constant_op.constant(True)
x_false, x_true = control_flow_ops.switch(x, s)
grad_x_true = gradients_impl.gradients(x_true, x)[0]
grad_x_false = gradients_impl.gradients(x_false, x)[0]
self.assertEquals(self.evaluate(grad_x_true), 1.)
self.assertEquals(self.evaluate(grad_x_false), 0.)
开发者ID:Wajih-O,项目名称:tensorflow,代码行数:8,代码来源:control_flow_ops_test.py
示例19: grad_fn
def grad_fn(inputs, trainable_variables, outputs, grad_outputs):
outputs = outputs[0]
grad_outputs = grad_outputs[0]
grad_inputs = gradients_impl.gradients(
outputs, inputs, grad_ys=grad_outputs)
grad_vars = gradients_impl.gradients(
outputs, trainable_variables, grad_ys=grad_outputs)
return grad_inputs, grad_vars
开发者ID:bikong2,项目名称:tensorflow,代码行数:8,代码来源:rev_block_lib_test.py
示例20: testNoGradients
def testNoGradients(self):
component = constant_op.constant([1.])
side = constant_op.constant(0.)
add = lambda x: x + side
dataset = dataset_ops.Dataset.from_tensor_slices(component).map(add)
value = dataset_ops.make_one_shot_iterator(dataset).get_next()
self.assertIsNone(gradients_impl.gradients(value, component)[0])
self.assertIsNone(gradients_impl.gradients(value, side)[0])
self.assertIsNone(gradients_impl.gradients(value, [component, side])[0])
开发者ID:kylin9872,项目名称:tensorflow,代码行数:9,代码来源:iterator_test.py
注:本文中的tensorflow.python.ops.gradients_impl.gradients函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。 |
请发表评论