本文整理汇总了Python中tensorflow.python.eager.imperative_grad.imperative_grad函数的典型用法代码示例。如果您正苦于以下问题:Python imperative_grad函数的具体用法?Python imperative_grad怎么用?Python imperative_grad使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了imperative_grad函数的12个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。
示例1: grad_fn
def grad_fn(*args):
"""Computes the gradient of the wrapped function."""
this_tape = tape.push_new_tape()
try:
end_node = f(*args)
if end_node is None:
raise ValueError("Cannot differentiate a function that returns None; "
"did you forget to return a value from {}?".format(
f.__name__))
finally:
tape.pop_tape(this_tape)
# Sorting variables by id, which is monotonically increasing in construction
# order. This ensures unique order across executions.
variables = list(sorted(this_tape.watched_variables(),
key=lambda v: v.handle._id)) # pylint: disable=protected-access
sources = [x.handle for x in variables]
if not sources:
raise ValueError("No trainable variables were accessed while the "
"function was being computed.")
grad = imperative_grad.imperative_grad(_default_vspace,
this_tape,
nest.flatten(end_node),
sources)
return end_node, list(zip(grad, variables))
开发者ID:andrewharp,项目名称:tensorflow,代码行数:25,代码来源:backprop.py
示例2: gradient
def gradient(self, target, sources):
"""Computes the gradient using information traced by the tape.
Args:
target: the tensor to be differentiated.
sources: a list of Tensors or Variables, the target will be
differentiated with respect to the sources.
Returns:
a list of Tensors (or IndexedSlices, or None), one for each element in
`sources`.
Raises:
RuntimeError: if called inside the context of the tape, or if called more
than once.
"""
if self._tape is None:
raise RuntimeError("GradientTape.gradient can only be called once "
"on non-persistent tapes, and "
"only when the context manager has exited.")
sources = [x.handle if isinstance(x, resource_variable_ops.ResourceVariable)
else x
for x in sources]
grad = imperative_grad.imperative_grad(
_default_vspace, self._tape, [target], sources)
if not self._persistent:
self._tape = None
return grad
开发者ID:andrewharp,项目名称:tensorflow,代码行数:28,代码来源:backprop.py
示例3: grad_fn
def grad_fn(*args):
"""Computes the gradient of the wrapped function."""
tape.push_new_tape()
end_node = f(*args)
variables = tape.top_tape_watched_variables()
sources = [x.handle for x in variables]
grad = imperative_grad.imperative_grad(_default_vspace,
nest.flatten(end_node),
sources)
return end_node, list(zip(grad, variables))
开发者ID:rajeev921,项目名称:tensorflow,代码行数:10,代码来源:backprop.py
示例4: gradient
def gradient(self, target, sources, output_gradients=None):
"""Computes the gradient using operations recorded in context of this tape.
Args:
target: Tensor (or list of tensors) to be differentiated.
sources: a list or nested structure of Tensors or Variables. `target`
will be differentiated against elements in `sources`.
output_gradients: a list of gradients, one for each element of
target. Defaults to None.
Returns:
a list or nested structure of Tensors (or IndexedSlices, or None),
one for each element in `sources`. Returned structure is the same as
the structure of `sources`.
Raises:
RuntimeError: if called inside the context of the tape, or if called more
than once on a non-persistent tape.
"""
if self._tape is None:
raise RuntimeError("GradientTape.gradient can only be called once on "
"non-persistent tapes.")
if self._recording:
if not self._persistent:
self._pop_tape()
else:
logging.log_first_n(logging.WARN,
"Calling GradientTape.gradient on a persistent "
"tape inside it's context is significantly less "
"efficient than calling it outside the context (it "
"causes the gradient ops to be recorded on the "
"tape, leading to increased CPU and memory usage). "
"Only call GradientTape.gradient inside the "
"context if you actually want to trace the "
"gradient in order to compute higher order "
"derrivatives.", 1)
flat_sources = nest.flatten(sources)
flat_sources = [_handle_or_self(x) for x in flat_sources]
if output_gradients is not None:
output_gradients = [None if x is None else ops.convert_to_tensor(x)
for x in nest.flatten(output_gradients)]
flat_grad = imperative_grad.imperative_grad(
self._tape,
nest.flatten(target),
flat_sources,
output_gradients=output_gradients)
if not self._persistent:
self._tape = None
grad = nest.pack_sequence_as(sources, flat_grad)
return grad
开发者ID:gunan,项目名称:tensorflow,代码行数:55,代码来源:backprop.py
示例5: grad_fn
def grad_fn(*args):
"""Computes the gradient of the wrapped function."""
tape.push_new_tape()
end_node = f(*args)
variables = tape.top_tape_watched_variables()
sources = [x.handle for x in variables]
if not sources:
raise ValueError("no trainable variables were accessed while the "
"function was being computed.")
grad = imperative_grad.imperative_grad(_default_vspace,
tape.pop_tape(),
nest.flatten(end_node),
sources)
return end_node, list(zip(grad, variables))
开发者ID:DjangoPeng,项目名称:tensorflow,代码行数:15,代码来源:backprop.py
示例6: decorated
def decorated(*args, **kwds):
"""Computes the value and gradient of the decorated function."""
dy = kwds.pop("dy", None)
if dy is not None:
dy = ops.convert_to_tensor(dy)
assert not kwds, "The gradient function can't take keyword arguments."
tape.push_new_tape()
sources = []
args = [
ops.convert_to_tensor(args[i]) if i in parameter_positions else args[i]
for i in range(len(args))
]
args = _ensure_unique_tensor_objects(parameter_positions, args)
for i in parameter_positions:
sources.append(args[i])
tape.watch(args[i])
result = f(*args)
return result, imperative_grad.imperative_grad(
_default_vspace, nest.flatten(result), sources,
output_gradients=nest.flatten(dy) if dy is not None else None)
开发者ID:rajeev921,项目名称:tensorflow,代码行数:20,代码来源:backprop.py
示例7: grad_fn
def grad_fn(*args, **kwds):
"""Computes the gradient of the wrapped function."""
this_tape = tape.push_new_tape()
try:
end_node = f(*args, **kwds)
if end_node is None:
raise ValueError("Cannot differentiate a function that returns None; "
"did you forget to return a value from {}?".format(
f.__name__))
finally:
tape.pop_tape(this_tape)
# Note: variables are returned in construction order. This ensures unique
# order across executions.
variables = this_tape.watched_variables()
if not variables:
raise ValueError("No trainable variables were accessed while the "
"function was being computed.")
sources = [v.handle for v in variables]
grad = imperative_grad.imperative_grad(this_tape, nest.flatten(end_node),
sources)
return end_node, list(zip(grad, variables))
开发者ID:adit-chandra,项目名称:tensorflow,代码行数:22,代码来源:backprop.py
示例8: grad_fn
def grad_fn(*args):
"""Computes the gradient of the wrapped function."""
tape.push_new_tape()
try:
end_node = f(*args)
if end_node is None:
raise ValueError("Cannot differentiate a function that returns None; "
"did you forget to return a value from {}?".format(
f.__name__))
finally:
popped_tape = tape.pop_tape()
variables = popped_tape.watched_variables()
sources = [x.handle for x in variables]
if not sources:
raise ValueError("No trainable variables were accessed while the "
"function was being computed.")
grad = imperative_grad.imperative_grad(_default_vspace,
popped_tape,
nest.flatten(end_node),
sources)
return end_node, list(zip(grad, variables))
开发者ID:SylChan,项目名称:tensorflow,代码行数:22,代码来源:backprop.py
示例9: gradient
def gradient(self, target, sources, output_gradients=None):
"""Computes the gradient using operations recorded in context of this tape.
Args:
target: Tensor (or list of tensors) to be differentiated.
sources: a list or nested structure of Tensors or Variables. `target`
will be differentiated against elements in `sources`.
output_gradients: a list of gradients, one for each element of
target. Defaults to None.
Returns:
a list or nested structure of Tensors (or IndexedSlices, or None),
one for each element in `sources`. Returned structure is the same as
the structure of `sources`.
Raises:
RuntimeError: if called inside the context of the tape, or if called more
than once on a non-persistent tape.
"""
if self._tape is None:
raise RuntimeError("GradientTape.gradient can only be called once "
"on non-persistent tapes, and "
"only when the context manager has exited.")
flat_sources = nest.flatten(sources)
flat_sources = [_handle_or_self(x) for x in flat_sources]
if output_gradients is not None:
output_gradients = [None if x is None else ops.convert_to_tensor(x)
for x in nest.flatten(output_gradients)]
flat_grad = imperative_grad.imperative_grad(
_default_vspace, self._tape, nest.flatten(target), flat_sources,
output_gradients=output_gradients)
if not self._persistent:
self._tape = None
grad = nest.pack_sequence_as(sources, flat_grad)
return grad
开发者ID:Jackiefan,项目名称:tensorflow,代码行数:39,代码来源:backprop.py
示例10: gradient
def gradient(self,
target,
sources,
output_gradients=None,
unconnected_gradients=UnconnectedGradients.NONE):
"""Computes the gradient using operations recorded in context of this tape.
Args:
target: Tensor (or list of tensors) to be differentiated.
sources: a list or nested structure of Tensors or Variables. `target`
will be differentiated against elements in `sources`.
output_gradients: a list of gradients, one for each element of
target. Defaults to None.
unconnected_gradients: a value which can either hold 'none' or 'zero' and
alters the value which will be returned if the target and sources are
unconnected. The possible values and effects are detailed in
'UnconnectedGradients' and it defaults to 'none'.
Returns:
a list or nested structure of Tensors (or IndexedSlices, or None),
one for each element in `sources`. Returned structure is the same as
the structure of `sources`.
Raises:
RuntimeError: if called inside the context of the tape, or if called more
than once on a non-persistent tape.
ValueError: if the target is a variable or if unconnected gradients is
called with an unknown value.
"""
if self._tape is None:
raise RuntimeError("GradientTape.gradient can only be called once on "
"non-persistent tapes.")
if self._recording:
if not self._persistent:
self._pop_tape()
else:
logging.log_first_n(
logging.WARN, "Calling GradientTape.gradient on a persistent "
"tape inside its context is significantly less "
"efficient than calling it outside the context (it "
"causes the gradient ops to be recorded on the "
"tape, leading to increased CPU and memory usage). "
"Only call GradientTape.gradient inside the "
"context if you actually want to trace the "
"gradient in order to compute higher order "
"derivatives.", 1)
flat_targets = []
for t in nest.flatten(target):
if not t.dtype.is_floating:
logging.vlog(
logging.WARN, "The dtype of the target tensor must be "
"floating (e.g. tf.float32) when calling GradientTape.gradient, "
"got %r", t.dtype)
if resource_variable_ops.is_resource_variable(t):
with self:
t = ops.convert_to_tensor(t)
flat_targets.append(t)
flat_sources = nest.flatten(sources)
flat_sources_raw = flat_sources
flat_sources = [_handle_or_self(x) for x in flat_sources]
for t in flat_sources_raw:
if not t.dtype.is_floating:
logging.vlog(
logging.WARN, "The dtype of the source tensor must be "
"floating (e.g. tf.float32) when calling GradientTape.gradient, "
"got %r", t.dtype)
if output_gradients is not None:
output_gradients = [None if x is None else ops.convert_to_tensor(x)
for x in nest.flatten(output_gradients)]
flat_grad = imperative_grad.imperative_grad(
self._tape,
flat_targets,
flat_sources,
output_gradients=output_gradients,
sources_raw=flat_sources_raw,
unconnected_gradients=unconnected_gradients)
if not self._persistent:
self._tape = None
grad = nest.pack_sequence_as(sources, flat_grad)
return grad
开发者ID:adit-chandra,项目名称:tensorflow,代码行数:86,代码来源:backprop.py
示例11: vjp
def vjp(dy=None):
if dy is not None:
dy = [ops.convert_to_tensor(x) for x in nest.flatten(dy)]
return imperative_grad.imperative_grad(
this_tape, nest.flatten(result), sources, output_gradients=dy)
开发者ID:adit-chandra,项目名称:tensorflow,代码行数:5,代码来源:backprop.py
示例12: vjp
def vjp(dy=None):
return imperative_grad.imperative_grad(
_default_vspace, t, nest.flatten(result), sources,
output_gradients=nest.flatten(dy) if dy is not None else None)
开发者ID:DjangoPeng,项目名称:tensorflow,代码行数:4,代码来源:backprop.py
注:本文中的tensorflow.python.eager.imperative_grad.imperative_grad函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。 |
请发表评论