本文整理汇总了Python中tensorflow.python.framework.ops.tensor_id函数的典型用法代码示例。如果您正苦于以下问题:Python tensor_id函数的具体用法?Python tensor_id怎么用?Python tensor_id使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了tensor_id函数的16个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。
示例1: capture_value
def capture_value(tensor_map, value, dtype, name):
"""Capture a value from outside the function, to pass in as an extra arg."""
captured_value = tensor_map.get(ops.tensor_id(value), None)
if captured_value is None:
captured_value = graph_placeholder(
dtype=dtype or value.dtype, shape=value.shape, name=name)
if captured_value.dtype == dtypes_module.resource:
handle_data = value._handle_data # pylint: disable=protected-access
captured_value._handle_data = handle_data # pylint: disable=protected-access
if handle_data is not None and handle_data.is_set:
# Ensure that shapes and dtypes are propagated.
shapes, types = zip(*[(pair.shape, pair.dtype)
for pair in handle_data.shape_and_type])
ranks = [len(s.dim) if not s.unknown_rank else -1 for s in shapes]
shapes = [[d.size for d in s.dim]
if not s.unknown_rank else None for s in shapes]
with errors.raise_exception_on_not_ok_status() as status:
pywrap_tensorflow.TF_GraphSetOutputHandleShapesAndTypes_wrapper(
captured_value._op._graph._c_graph, # pylint: disable=protected-access
captured_value._as_tf_output(), # pylint: disable=protected-access
shapes,
ranks,
types,
status)
tensor_map[ops.tensor_id(value)] = (value, captured_value)
else:
captured_value = captured_value[1]
tape.record_operation("captured_value", [captured_value], [value],
lambda x: [x])
return captured_value
开发者ID:AndrewTwinz,项目名称:tensorflow,代码行数:31,代码来源:function.py
示例2: _convert_to_graph_tensor
def _convert_to_graph_tensor(value, dtype=None, name=None, as_ref=False):
"""Captures a Tensor while building a graph mode function.
Arguments:
value: A Tensor object.
dtype: The datatype of the value produced by the node in the graph.
name: Name of the node in the graph.
as_ref: Ignored (required by register_tensor_conversion_function).
Returns:
Returns a constant (the current value of the tensor) if capturing
is not enabled. A placeholder which will have the value of the
tensor at runtime otherwise.
"""
if context.in_eager_mode():
return value
_ = as_ref
tensor_map = _scoped_captures.tensors
if tensor_map is None:
# Capturing is not enabled.
return constant_op.constant(value.numpy())
captured_value = tensor_map.get(ops.tensor_id(value), None)
if captured_value is None:
captured_value = graph_placeholder(
dtype=dtype or value.dtype, shape=value.shape, name=name)
if captured_value.dtype == dtypes.resource:
captured_value._handle_data = value._handle_data # pylint: disable=protected-access
tensor_map[ops.tensor_id(value)] = (value, captured_value)
else:
captured_value = captured_value[1]
tape.record_operation("captured_value", [captured_value], [value], [],
lambda x: x)
return captured_value
开发者ID:Mazecreator,项目名称:tensorflow,代码行数:33,代码来源:function.py
示例3: _convert_to_graph_constant
def _convert_to_graph_constant(value, dtype=None, name=None, as_ref=False):
"""Captures a tfe Tensor while building a graph mode function.
Creates a placeholder to pass the tensor as an argument.
Arguments:
value: A tfe.Tensor object
dtype: The datatype of the value produced by the node in the graph.
name: Name of the node in the graph.
as_ref: Ignored (required by register_tensor_conversion_function).
Returns:
A placeholder which will, at runtime, have the value of this tensor.
Raises:
ValueError: if called outside a defun context.
"""
if context.in_eager_mode():
return value
_ = as_ref
tensor_map = _scoped_captures.tensors
if tensor_map is None:
raise ValueError(
"Trying to use tfe.Tensor objects in a graph outside graph mode. "
"To build a graph use tfe.defun or tfe.make_template.")
captured_value = tensor_map.get(ops.tensor_id(value), None)
if captured_value is None:
captured_value = graph_placeholder(
dtype=dtype or value.dtype, shape=value.shape, name=name)
if captured_value.dtype == dtypes.resource:
captured_value._handle_data = value._handle_data # pylint: disable=protected-access
tensor_map[ops.tensor_id(value)] = (value, captured_value)
else:
captured_value = captured_value[1]
return captured_value
开发者ID:chdinh,项目名称:tensorflow,代码行数:35,代码来源:function.py
示例4: _watch_with_tape
def _watch_with_tape(tape, resource_variable):
"""Wraps a watched Tensor and keeps track of it in the implicit tape."""
tensor = resource_variable.handle
w = _watch_with_tape_internal(tape, tensor)
if ag_core.isnode(tape):
tape.value.variables[ops.tensor_id(tensor)] = resource_variable
tape.value.tensors[ops.tensor_id(tensor)] = w
开发者ID:solaris33,项目名称:tensorflow,代码行数:7,代码来源:tape.py
示例5: capture_value
def capture_value(tensor_map, value, dtype, name):
"""Capture a value from outside the function, to pass in as an extra arg."""
captured_value = tensor_map.get(ops.tensor_id(value), None)
if captured_value is None:
captured_value = graph_placeholder(
dtype=dtype or value.dtype, shape=value.shape, name=name)
if captured_value.dtype == dtypes.resource:
captured_value._handle_data = value._handle_data # pylint: disable=protected-access
tensor_map[ops.tensor_id(value)] = (value, captured_value)
else:
captured_value = captured_value[1]
tape.record_operation("captured_value", [captured_value], [value],
lambda x: [x])
return captured_value
开发者ID:SylChan,项目名称:tensorflow,代码行数:14,代码来源:function.py
示例6: _ensure_unique_tensor_objects
def _ensure_unique_tensor_objects(parameter_positions, args):
"""Make each of the parameter_positions in args a unique ops.Tensor object.
Ensure that each parameter is treated independently.
For example:
def f(x, y): return x * y
g = gradients_function(f)
one = tf.constant(1.)
g(one, one) should return [1., 1.]
(even though the two arguments are the same Tensor object).
Args:
parameter_positions: List of indices into args defining the arguments to
differentiate against.
args: A list of arguments to the function to be differentiated.
Returns:
args, possibly edited in-place.
"""
s = set()
for (i, t) in enumerate(args):
if i in parameter_positions:
tid = ops.tensor_id(t)
if tid in s:
args[i] = gen_array_ops.identity(args[i])
else:
s.add(tid)
return args
开发者ID:adit-chandra,项目名称:tensorflow,代码行数:30,代码来源:backprop.py
示例7: execute
def execute(op_name, num_outputs, inputs, attrs=None, name=None):
"""Execute a TensorFlow operation.
Args:
op_name: Name of the TensorFlow operation (see REGISTER_OP in C++ code) to
execute.
num_outputs: The number of outputs of the operation to fetch.
(Explicitly provided instead of being inferred for performance
reasons).
inputs: A list of inputs to the operation. Each entry should be a Tensor, or
a value which can be passed to the Tensor constructor to create one.
attrs: A tuple with alternating string attr names and attr values for this
operation.
name: Customized name for the operation.
Returns:
None if there are no outputs, a single Tensor object if there is one output
and a list of Tensor objects if there are multiple outputs.
Raises:
An exception on error.
"""
ctx = context.get_default_context()
# TODO(apassos) move this to convert_to_tensor
inputs = [ag_core.getval(x) for x in inputs]
# pylint: disable=protected-access
input_handles = [c._handle for c in inputs]
device_name = ctx.device_name
try:
outh = pywrap_tensorflow.TFE_Py_Execute(ctx._handle, device_name,
str(op_name), input_handles, attrs,
num_outputs)
# pylint: enable=protected-access
except core._NotOkStatusException as e: # pylint: disable=protected-access
if name is not None:
message = e.message + " name: " + name
else:
message = e.message
raise core._status_to_exception(e.code, message) # pylint: disable=protected-access
# pylint: enable=protected-access
tensors = [tensor._tensor_from_handle(x) for x in outh] # pylint: disable=protected-access
# TODO(alive, cais): Use the execution callback mechanism.
if core.active_trace() is not None:
trace_name = name if name else op_name
for t in tensors:
# pylint: disable=protected-access
core.active_trace().record_tensor(trace_name,
ops.tensor_id(t),
t._device_name(),
t.shape.num_elements())
# pylint: enable=protected-access
# TODO(cais): Optimize this, perhaps by replacing this execute function with
# a different one when there are execution callback(s).
for callback in ctx.post_execution_callbacks:
callback(op_name, name, attrs, inputs, tensors)
return tensors
开发者ID:keveman,项目名称:tensorflow,代码行数:59,代码来源:execute.py
示例8: _prepare_backprop
def _prepare_backprop(target, tensor_to_op, op_to_entry, id_sources):
"""Filters the tape to only include relevant entries and counts tensor usages.
Args:
target: the target to optimize.
tensor_to_op: Map from tensor id to key in op_to_entry that produced it.
op_to_entry: Map from op id to a tape.TapeEntry object
id_sources: the ids of the sources wrt the gradient is being taken.
Returns:
usage counts (how many entries downstream from a tensor use it)
op_to_entry_map: entry map (a filtered tape, with only the relevant
entries),
missing: map from tensor id to how many downstream gradients still need
to be computed before this tensor's gradient can be computed.
"""
if isinstance(target, (ops.Tensor)):
tensor_stack = [ops.tensor_id(target)]
else:
tensor_stack = list([ops.tensor_id(x) for x in target])
tensor_usage_counts = {}
o_to_e = {} # Copy of just the bits we need from op_to_entry
while tensor_stack:
t = tensor_stack.pop()
op = tensor_to_op[t]
# op is None if the tensor is a source (i.e. was watched directly)
if op is None or op in o_to_e:
continue
op_trace = op_to_entry[op]
o_to_e[op] = op_trace
for it in op_trace.input_ids:
if it in tensor_usage_counts:
tensor_usage_counts[it] += 1
else:
tensor_usage_counts[it] = 1
if it not in id_sources and it in tensor_to_op:
tensor_stack.append(it)
op_missing_tensor_counts = collections.defaultdict(int)
for t in tensor_usage_counts:
if t in tensor_to_op and tensor_to_op[t] is not None:
op_missing_tensor_counts[tensor_to_op[t]] += 1
return tensor_usage_counts, o_to_e, op_missing_tensor_counts
开发者ID:Crazyonxh,项目名称:tensorflow,代码行数:42,代码来源:backprop.py
示例9: capture_value
def capture_value(tensor_map, value, dtype, name):
"""Capture a value from outside the function, to pass in as an extra arg."""
captured_value = tensor_map.get(ops.tensor_id(value), None)
if captured_value is None:
captured_value = graph_placeholder(
dtype=dtype or value.dtype, shape=value.shape, name=name)
if captured_value.dtype == dtypes_module.resource:
if ops._USE_C_SHAPES: # pylint: disable=protected-access
if isinstance(value, ops.EagerTensor):
handle_data = value._handle_data # pylint: disable=protected-access
else:
handle_data = resource_variable_ops.get_resource_handle_data(value)
else:
handle_data = value._handle_data # pylint: disable=protected-access
if handle_data is not None and handle_data.is_set:
# pylint: disable=protected-access
if ops._USE_C_SHAPES:
pywrap_tensorflow.SetResourceHandleShapeAndType(
captured_value.graph._c_graph, captured_value._as_tf_output(),
handle_data.SerializeToString())
else:
captured_value._handle_data = handle_data
# pylint: enable=protected-access
# Ensure that shapes and dtypes are propagated.
shapes, types = zip(*[(pair.shape, pair.dtype)
for pair in handle_data.shape_and_type])
ranks = [len(s.dim) if not s.unknown_rank else -1 for s in shapes]
shapes = [[d.size for d in s.dim]
if not s.unknown_rank else None for s in shapes]
pywrap_tensorflow.TF_GraphSetOutputHandleShapesAndTypes_wrapper(
captured_value._op._graph._c_graph, # pylint: disable=protected-access
captured_value._as_tf_output(), # pylint: disable=protected-access
shapes, ranks, types)
tensor_map[ops.tensor_id(value)] = (value, captured_value)
else:
captured_value = captured_value[1]
tape.record_operation("captured_value", [captured_value], [value],
lambda x: [x])
return captured_value
开发者ID:Jackiefan,项目名称:tensorflow,代码行数:40,代码来源:function.py
示例10: _initial_gradients
def _initial_gradients(target, output_gradients, tensor_usage_counts):
"""Computes the initial gradients for each Tensor."""
# Initialize the backprop stack
gradients = collections.defaultdict(list)
if isinstance(target, ops.Tensor):
if output_gradients is not None:
output_gradient = output_gradients
else:
output_gradient = array_ops.ones_like(target)
gradients[ops.tensor_id(target)].append(output_gradient)
else:
for i, t in enumerate(target):
if ops.tensor_id(t) in tensor_usage_counts:
# Can't provide a gradient of something we're trying to differentiate
assert output_gradients is None or output_gradients[i] is None
else:
if output_gradients is None or output_gradients[i] is None:
out_grad = array_ops.ones_like(t)
else:
out_grad = output_gradients[i]
gradients[ops.tensor_id(t)].append(out_grad)
return gradients
开发者ID:Crazyonxh,项目名称:tensorflow,代码行数:22,代码来源:backprop.py
示例11: _backprop_call
def _backprop_call(self, args):
"""Calls the wrapped function and records the result on a tape."""
all_args = args + self._extra_inputs
signature = self._forward_fdef.definition.signature
if context.in_graph_mode():
g = ops.get_default_graph()
g._add_function(self._forward_fdef) # pylint: disable=protected-access
unwrapped_args = [ag_core.getval(x) for x in all_args]
op = g.create_op(
signature.name, [ops.convert_to_tensor(x) for x in unwrapped_args],
[dtypes.DType(x.type) for x in signature.output_arg],
op_def=signature,
name="FunctionCall",
compute_shapes=False)
outputs = op.outputs
outputs = [outputs] if isinstance(
outputs, (tensor.Tensor, ops.Tensor, type(None))) else list(outputs)
for i, s in enumerate(self._output_shapes):
outputs[i].set_shape(s)
else:
outputs = execute.execute(
signature.name,
num_outputs=len(signature.output_arg),
inputs=all_args)
real_outputs = outputs[:len(self._returns)]
side_outputs = outputs[len(self._returns):]
watched_extra_inputs = []
for t in self._extra_inputs:
tid = ops.tensor_id(t)
for t in tape._tape_stack.stack: # pylint: disable=protected-access
w = t.value.tensors.get(tid, None)
if w is not None:
watched_extra_inputs.append(w)
break
else: # Note: for-else here done on purpose
watched_extra_inputs.append(t)
def backward_function_wrapper(*outputs):
outputs = outputs[len(real_outputs):]
return self._backward_function(*outputs)
real_outputs = tape.record_operation(
real_outputs,
(args + watched_extra_inputs),
side_outputs,
backward_function_wrapper)
return self._build_call_outputs(self._returns, real_outputs)
开发者ID:keveman,项目名称:tensorflow,代码行数:47,代码来源:function.py
示例12: _watch_with_tape
def _watch_with_tape(tape, tensor):
"""Wraps a watched Tensor and keeps track of it in the implicit tape."""
w = _watch_with_tape_internal(tape, tensor)
if ag_core.isnode(tape):
tape.value.tensors[ops.tensor_id(tensor)] = w
return w
开发者ID:keveman,项目名称:tensorflow,代码行数:6,代码来源:tape.py
示例13: any_tape_has
def any_tape_has(tensor):
for t in _tape_stack.stack:
if ops.tensor_id(tensor) in t.value.tensors:
return True
return False
开发者ID:keveman,项目名称:tensorflow,代码行数:5,代码来源:tape.py
示例14: _watch_value_from_tape
def _watch_value_from_tape(tensor):
for t in tape._tape_stack.stack: # pylint: disable=protected-access
w = t.value.tensors.get(tf_ops.tensor_id(tensor), None)
if w is not None:
return w
return tensor
开发者ID:solaris33,项目名称:tensorflow,代码行数:6,代码来源:custom_gradient.py
示例15: imperative_grad
def imperative_grad(
target,
sources,
output_gradients=None):
"""Computes gradients from the imperatively defined tape on top of the stack.
Works by filtering the tape, computing how many downstream usages are of each
tensor and entry, and repeatedly applying backward functions until we have
gradients for all sources.
Args:
target: either a Tensor or list of Tensors to be differentiated.
sources: list of Tensors for which we want gradients
output_gradients: if not None, a list of gradient provided for each Target,
or None if we are to use the target's computed downstream gradient.
Returns:
the gradient wrt each of the sources.
Raises:
RuntimeError: if something goes wrong.
ValueError: if there is no sequence of differentiable operations connecting
a source and any target Tensor. This can happen either if the target is
not computed based on the source, if the tracing was set up incorrectly,
or if only non-differentiable functions of the source were used in the
computation of target.
"""
if not tape._tape_stack.stack: # pylint: disable=protected-access
raise RuntimeError("Computing a gradient with no tape present")
bp_tape = tape.pop_tape()
tensor_to_op, op_to_entry = bp_tape.export()
# This overwrites the op_to_entry variable, which will release all memory used
# to keep traces that are irrelevant to the gradient computation we're doing
# here.
id_sources = [ops.tensor_id(t) for t in sources]
tensor_usage_counts, op_to_entry, op_missing_tensor = _prepare_backprop(
target, tensor_to_op, op_to_entry, id_sources)
ready_ops = _initialize_backprop_stack(op_to_entry, op_missing_tensor)
gradients = _initial_gradients(target, output_gradients,
tensor_usage_counts)
gradients_size = dict()
# Now exhaust the backprop stack
while ready_ops:
op = ready_ops.pop()
op_trace = op_to_entry.pop(op)
out_gradients = [gradients.pop(t, None) for t in op_trace.output_ids]
for i in range(len(out_gradients)):
if out_gradients[i] is None:
# TODO(apassos) this should be in the right device
none_indices = _grad_fn_accepts_none_for_indices.get(
op_trace.op_type, None)
if none_indices is None or i not in none_indices:
out_gradients[i] = array_ops.zeros(
*op_trace.output_shape_and_dtype[i])
else:
out_gradients[i] = _aggregate_grads(out_gradients[i])
in_gradients = op_trace.backward_function(
*(out_gradients + op_trace.side_outputs))
in_gradients = ([in_gradients]
if isinstance(in_gradients, (ops.Tensor,
ops.IndexedSlices,
type(None)))
else in_gradients)
for i, t in enumerate(op_trace.input_ids):
if in_gradients[i] is not None:
_add_new_grads(gradients, gradients_size, t, in_gradients[i])
if tensor_usage_counts.get(t, 0) > 0:
tensor_usage_counts[t] -= 1
if (t in tensor_to_op
and tensor_usage_counts[t] == 0
and t not in id_sources):
in_op = tensor_to_op[t]
if in_op is None:
continue
if op_missing_tensor.get(in_op, 0) > 0:
op_missing_tensor[in_op] -= 1
if op_missing_tensor.get(in_op, 0) == 0:
ready_ops.append(in_op)
result = []
for i, s in enumerate(sources):
g = gradients.get(ops.tensor_id(s), None)
if g is None:
# TODO(apassos): figure out a way to summarize why sources and targets are
# not connected.
raise ValueError("There is no sequence of operations connecting source "
"tensor %s (%s) to any of the target Tensors. This is "
"commonly caused by the tape not recording all "
"operations in the forward pass or if by mistake a "
"source was only used in non-differentiable operations."
% (i, s))
result.append(_aggregate_grads(g))
return result
开发者ID:Crazyonxh,项目名称:tensorflow,代码行数:93,代码来源:backprop.py
示例16: mut_add
def mut_add(implicit_tape):
resource_variable = tape.value.variables[ops.tensor_id(tensor)]
implicit_tape.gradients.append((g, resource_variable))
return implicit_tape
开发者ID:solaris33,项目名称:tensorflow,代码行数:4,代码来源:tape.py
注:本文中的tensorflow.python.framework.ops.tensor_id函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。 |
请发表评论