本文整理汇总了Python中tensorflow.python.eager.tape.watch函数的典型用法代码示例。如果您正苦于以下问题:Python watch函数的具体用法?Python watch怎么用?Python watch使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了watch函数的12个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。
示例1: decorated
def decorated(*args, **kwds):
"""Computes the value and gradient of the decorated function."""
parameter_positions = _get_arg_spec(f, params, args)
assert not kwds, "The gradient function can't take keyword arguments."
this_tape = tape.push_new_tape(persistent=persistent)
try:
sources = []
args = [
ops.convert_to_tensor(args[i])
if i in parameter_positions else args[i]
for i in range(len(args))
]
args = _ensure_unique_tensor_objects(parameter_positions, args)
for i in parameter_positions:
sources.append(args[i])
tape.watch(this_tape, args[i])
result = f(*args)
if result is None:
raise ValueError("Cannot differentiate a function that returns None; "
"did you forget to return a value from {}?".format(
f.__name__))
flat_result = nest.flatten(result)
flat_result = [gen_array_ops.identity(x) for x in flat_result]
result = nest.pack_sequence_as(result, flat_result)
finally:
tape.pop_tape(this_tape)
def vjp(dy=None):
if dy is not None:
dy = [ops.convert_to_tensor(x) for x in nest.flatten(dy)]
return imperative_grad.imperative_grad(
this_tape, nest.flatten(result), sources, output_gradients=dy)
return result, vjp
开发者ID:adit-chandra,项目名称:tensorflow,代码行数:33,代码来源:backprop.py
示例2: watch
def watch(self, tensor):
"""Ensures that `tensor` is being traced by this tape.
Args:
tensor: a Tensor or list of Tensors.
"""
for t in nest.flatten(tensor):
tape.watch(_handle_or_self(t))
开发者ID:Eagle732,项目名称:tensorflow,代码行数:8,代码来源:backprop.py
示例3: watch
def watch(self, tensor):
"""Ensures that `tensor` is being traced by this tape.
Args:
tensor: a Tensor or Variable a list of Tensors or Variables.
"""
for t in nest.flatten(tensor):
if isinstance(t, resource_variable_ops.ResourceVariable):
t = t.handle
tape.watch(t)
开发者ID:andrewharp,项目名称:tensorflow,代码行数:10,代码来源:backprop.py
示例4: watch
def watch(self, tensor):
"""Ensures that `tensor` is being traced by this tape.
Args:
tensor: a Tensor or list of Tensors.
"""
for t in nest.flatten(tensor):
if hasattr(t, "handle"):
# There are many variable-like objects, all of them currently have
# `handle` attribute that points to a tensor. If this changes, internals
# of watch_variable need to change as well.
tape.watch_variable(self._tape, t)
else:
tape.watch(self._tape, t)
开发者ID:gunan,项目名称:tensorflow,代码行数:14,代码来源:backprop.py
示例5: decorated
def decorated(*args, **kwds):
"""Computes the value and gradient of the decorated function."""
dy = kwds.pop("dy", None)
if dy is not None:
dy = ops.convert_to_tensor(dy)
assert not kwds, "The gradient function can't take keyword arguments."
tape.push_new_tape()
sources = []
args = [ops.convert_to_tensor(x) for x in args]
for i in parameter_positions:
sources.append(args[i])
tape.watch(args[i])
result = f(*args)
return result, imperative_grad(
result,
sources,
output_gradients=dy)
开发者ID:1000sprites,项目名称:tensorflow,代码行数:17,代码来源:backprop.py
示例6: watch
def watch(self, tensor):
"""Ensures that `tensor` is being traced by this tape.
Args:
tensor: a Tensor or list of Tensors.
"""
for t in nest.flatten(tensor):
if not t.dtype.is_floating:
logging.log_first_n(
logging.WARN, "The dtype of the watched tensor must be "
"floating (e.g. tf.float32), got %r", 5, t.dtype)
if hasattr(t, "handle"):
# There are many variable-like objects, all of them currently have
# `handle` attribute that points to a tensor. If this changes, internals
# of watch_variable need to change as well.
tape.watch_variable(self._tape, t)
else:
tape.watch(self._tape, t)
开发者ID:adit-chandra,项目名称:tensorflow,代码行数:18,代码来源:backprop.py
示例7: decorated
def decorated(*args, **kwds):
"""Computes the value and gradient of the decorated function."""
dy = kwds.pop("dy", None)
if dy is not None:
dy = ops.convert_to_tensor(dy)
assert not kwds, "The gradient function can't take keyword arguments."
tape.push_new_tape()
sources = []
args = [
ops.convert_to_tensor(args[i]) if i in parameter_positions else args[i]
for i in range(len(args))
]
args = _ensure_unique_tensor_objects(parameter_positions, args)
for i in parameter_positions:
sources.append(args[i])
tape.watch(args[i])
result = f(*args)
return result, imperative_grad.imperative_grad(
_default_vspace, nest.flatten(result), sources,
output_gradients=nest.flatten(dy) if dy is not None else None)
开发者ID:rajeev921,项目名称:tensorflow,代码行数:20,代码来源:backprop.py
示例8: g
def g(x):
tape.watch(three)
return f(x)
开发者ID:chdinh,项目名称:tensorflow,代码行数:3,代码来源:function_test.py
示例9: f
def f():
x = constant_op.constant(1.0)
tape.watch(x)
x = gradient_is_constant(x)
x = gradient_is_constant(x)
x = gradient_is_constant(x)
开发者ID:DjangoPeng,项目名称:tensorflow,代码行数:6,代码来源:tape_test.py
示例10: fn
def fn():
tape.watch(x.handle)
b = tensor.Tensor(2.0)
c = math_ops.add(x.value(), b)
return math_ops.add(c, tensor.Tensor(3.0))
开发者ID:chdinh,项目名称:tensorflow,代码行数:5,代码来源:backprop_test.py
示例11: f
def f():
with context.device('gpu:0'):
tape.watch(v.handle)
return v.read_value()
开发者ID:chdinh,项目名称:tensorflow,代码行数:4,代码来源:backprop_test.py
示例12: _read_variable_op
def _read_variable_op(self):
if context.in_eager_mode() and self._trainable:
tape.watch(self._handle)
return read_variable_op(self._handle, dtype=self._dtype)
开发者ID:keveman,项目名称:tensorflow,代码行数:4,代码来源:resource_variable_ops.py
注:本文中的tensorflow.python.eager.tape.watch函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。 |
请发表评论