本文整理汇总了Python中tensorflow.python.eager.context.in_eager_mode函数的典型用法代码示例。如果您正苦于以下问题:Python in_eager_mode函数的具体用法?Python in_eager_mode怎么用?Python in_eager_mode使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了in_eager_mode函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。
示例1: add_variable
def add_variable(self, name, shape=None, dtype=None, initializer=None):
"""***Only for use by descendants of Metric***."""
if self._built:
raise RuntimeError("Can't call add_variable() except in build().")
if context.in_eager_mode():
collections = None
else:
if self._use_global_variables:
collections = [ops.GraphKeys.GLOBAL_VARIABLES]
else:
collections = [ops.GraphKeys.LOCAL_VARIABLES]
collections += [ops.GraphKeys.METRIC_VARIABLES]
# Variables are Checkpointable dependencies of Metrics regardless of the
# global/local distinction. Users can avoid saving variables by not adding a
# dependency on the Metric.
v = self._add_variable_with_custom_getter(
name=name,
shape=shape,
dtype=dtype,
initializer=initializer,
trainable=False,
collections=collections,
use_resource=True,
getter=variable_scope.get_variable,
# Raise duplicate variable exceptions from get_variable rather than
# Checkpointable.
overwrite=True)
self._vars.append(v)
if context.in_eager_mode():
self._initial_values[v] = v.value()
return v
开发者ID:QiangCai,项目名称:tensorflow,代码行数:31,代码来源:metrics_impl.py
示例2: infer_step
def infer_step(recent_output, recent_logits, unused_loss):
"""Inference step."""
if not context.in_eager_mode():
recent_output.set_shape([None, None, None, 1])
padded = tf.pad(recent_output, [[0, 0], [0, 1], [0, 0], [0, 0]])
features["targets"] = padded
# This is inefficient in that it generates samples at all timesteps,
# not just the last one, except if target_modality is pointwise.
samples, logits, losses = self.sample(features)
# Concatenate the already-generated recent_output with last timestep
# of the newly-generated samples.
if target_modality.top_is_pointwise:
cur_sample = samples[:, -1, :, :]
else:
cur_sample = samples[:,
common_layers.shape_list(recent_output)[1], :, :]
cur_sample = tf.to_int64(tf.expand_dims(cur_sample, axis=1))
samples = tf.concat([recent_output, cur_sample], axis=1)
if not context.in_eager_mode():
samples.set_shape([None, None, None, 1])
# Assuming we have one shard for logits.
logits = tf.concat([recent_logits, logits[:, -1:]], 1)
loss = sum([l for l in losses.values() if l is not None])
return samples, logits, loss
开发者ID:AranKomat,项目名称:tensor2tensor,代码行数:25,代码来源:t2t_model.py
示例3: testVariablesAcrossGraphs
def testVariablesAcrossGraphs(self):
optimizer = momentum_lib.MomentumOptimizer(0.01, 0.5)
with ops.Graph().as_default():
var0 = resource_variable_ops.ResourceVariable(
[1.0, 2.0], dtype=dtypes.float32, name="var0")
var1 = resource_variable_ops.ResourceVariable(
[3.0, 4.0], dtype=dtypes.float32, name="var1")
if context.in_eager_mode():
loss = lambda: math_ops.reduce_sum(var0 + var1)
else:
loss = math_ops.reduce_sum(var0 + var1)
optimizer.minimize(loss)
optimizer_variables = optimizer.variables()
self.assertStartsWith(optimizer_variables[0].name, "var0")
self.assertStartsWith(optimizer_variables[1].name, "var1")
self.assertEquals(2, len(optimizer_variables))
with ops.Graph().as_default():
var2 = resource_variable_ops.ResourceVariable(
[1.0, 2.0], dtype=dtypes.float32, name="var2")
var3 = resource_variable_ops.ResourceVariable(
[3.0, 4.0], dtype=dtypes.float32, name="var3")
if context.in_eager_mode():
loss = lambda: math_ops.reduce_sum(var2 + var3)
else:
loss = math_ops.reduce_sum(var2 + var3)
optimizer.minimize(loss)
optimizer_variables = optimizer.variables()
self.assertStartsWith(optimizer_variables[0].name, "var2")
self.assertStartsWith(optimizer_variables[1].name, "var3")
self.assertEquals(2, len(optimizer_variables))
开发者ID:SylChan,项目名称:tensorflow,代码行数:31,代码来源:momentum_test.py
示例4: test_zero_padding_1d
def test_zero_padding_1d(self):
num_samples = 2
input_dim = 2
num_steps = 5
shape = (num_samples, num_steps, input_dim)
inputs = np.ones(shape)
# basic test
with self.test_session(use_gpu=True):
testing_utils.layer_test(
keras.layers.ZeroPadding1D,
kwargs={'padding': 2},
input_shape=inputs.shape)
testing_utils.layer_test(
keras.layers.ZeroPadding1D,
kwargs={'padding': (1, 2)},
input_shape=inputs.shape)
# correctness test
with self.test_session(use_gpu=True):
layer = keras.layers.ZeroPadding1D(padding=2)
layer.build(shape)
output = layer(keras.backend.variable(inputs))
if context.in_eager_mode():
np_output = output.numpy()
else:
np_output = keras.backend.eval(output)
for offset in [0, 1, -1, -2]:
np.testing.assert_allclose(np_output[:, offset, :], 0.)
np.testing.assert_allclose(np_output[:, 2:-2, :], 1.)
layer = keras.layers.ZeroPadding1D(padding=(1, 2))
layer.build(shape)
output = layer(keras.backend.variable(inputs))
if context.in_eager_mode():
np_output = output.numpy()
else:
np_output = keras.backend.eval(output)
for left_offset in [0]:
np.testing.assert_allclose(np_output[:, left_offset, :], 0.)
for right_offset in [-1, -2]:
np.testing.assert_allclose(np_output[:, right_offset, :], 0.)
np.testing.assert_allclose(np_output[:, 1:-2, :], 1.)
layer.get_config()
# test incorrect use
with self.assertRaises(ValueError):
keras.layers.ZeroPadding1D(padding=(1, 1, 1))
with self.assertRaises(ValueError):
keras.layers.ZeroPadding1D(padding=None)
开发者ID:QiangCai,项目名称:tensorflow,代码行数:50,代码来源:convolutional_test.py
示例5: test_averagepooling_2d
def test_averagepooling_2d(self):
testing_utils.layer_test(
keras.layers.AveragePooling2D,
kwargs={'strides': (2, 2),
'padding': 'same',
'pool_size': (2, 2)},
input_shape=(3, 5, 6, 4))
testing_utils.layer_test(
keras.layers.AveragePooling2D,
kwargs={'strides': (2, 2),
'padding': 'valid',
'pool_size': (3, 3)},
input_shape=(3, 5, 6, 4))
# This part of the test can only run on GPU but doesn't appear
# to be properly assigned to a GPU when running in eager mode.
if not context.in_eager_mode():
# Only runs on GPU with CUDA, channels_first is not supported on CPU.
# TODO(b/62340061): Support channels_first on CPU.
if test.is_gpu_available(cuda_only=True):
testing_utils.layer_test(
keras.layers.AveragePooling2D,
kwargs={
'strides': (1, 1),
'padding': 'valid',
'pool_size': (2, 2),
'data_format': 'channels_first'
},
input_shape=(3, 4, 5, 6))
开发者ID:QiangCai,项目名称:tensorflow,代码行数:29,代码来源:pooling_test.py
示例6: assert_integer
def assert_integer(x, message=None, name=None):
"""Assert that `x` is of integer dtype.
Example of adding a dependency to an operation:
```python
with tf.control_dependencies([tf.assert_integer(x)]):
output = tf.reduce_sum(x)
```
Args:
x: `Tensor` whose basetype is integer and is not quantized.
message: A string to prefix to the default message.
name: A name for this operation (optional). Defaults to "assert_integer".
Raises:
TypeError: If `x.dtype` is anything other than non-quantized integer.
Returns:
A `no_op` that does nothing. Type can be determined statically.
"""
message = message or ''
with ops.name_scope(name, 'assert_integer', [x]):
x = ops.convert_to_tensor(x, name='x')
if not x.dtype.is_integer:
if context.in_eager_mode():
name = 'tensor'
else:
name = x.name
err_msg = (
'%s Expected "x" to be integer type. Found: %s of dtype %s'
% (message, name, x.dtype))
raise TypeError(err_msg)
return control_flow_ops.no_op('statically_determined_was_integer')
开发者ID:QiangCai,项目名称:tensorflow,代码行数:35,代码来源:check_ops.py
示例7: _defun_internal
def _defun_internal(name, func, args, kwds):
"""Defines and returns graph-mode version of func."""
graph_key = ops.get_default_graph()._graph_key # pylint: disable=protected-access
with context.graph_mode():
captures = {}
tmp_graph = CapturingGraph(captures)
# Inherit the graph key, since this is used for matching variables in
# optimizers.
tmp_graph._graph_key = graph_key # pylint: disable=protected-access
# Copy the graph collections to ensure summaries and other things work. This
# lets the function access (but not mutate) collections of the containing
# graph, such as the global step and the summary writer collections.
curr_graph = ops.get_default_graph()
for collection in curr_graph.collections:
tmp_graph.get_collection_ref(collection)[:] = curr_graph.get_collection(
collection)
with tmp_graph.as_default():
func_inputs = _get_defun_inputs(args)
with capture_tensors(captures):
this_tape = tape.push_new_tape()
try:
func_outputs = func(*func_inputs, **kwds)
finally:
tape.pop_tape(this_tape)
variables = this_tape.watched_variables()
# Returning a closed-over tensor as an output does not trigger a
# call to convert_to_tensor, so we manually capture all such tensors.
outputs_list = _flatten(func_outputs)
func_def_outputs = [
_convert_to_graph_tensor(x) for x in outputs_list if x is not None
]
ids = list(sorted(captures.keys()))
if ids:
extra_inputs, extra_placeholders = zip(* [captures[x] for x in ids])
else:
extra_inputs = []
extra_placeholders = []
output_shapes = tuple(
x.shape if isinstance(x, ops.Tensor) else None
for x in outputs_list)
flat_inputs = [x for x in nest.flatten(func_inputs)
if isinstance(x, ops.Tensor)]
all_inputs = flat_inputs + list(extra_placeholders)
all_ignored_ops = frozenset(x.op for x in all_inputs)
fname = _inference_name(name)
operations = tuple(x for x in tmp_graph.get_operations()
if x not in all_ignored_ops)
# Register any other functions defined in the graph
# TODO(ashankar): Oh lord, forgive me for this lint travesty.
if context.in_eager_mode():
for f in tmp_graph._functions.values(): # pylint: disable=protected-access
# TODO(ashankar): What about the gradient registry?
_register(f._c_func) # pylint: disable=protected-access
return GraphModeFunction(
fname, all_inputs, extra_inputs, tmp_graph, operations, func_def_outputs,
func_outputs, output_shapes, variables)
开发者ID:ChengYuXiang,项目名称:tensorflow,代码行数:60,代码来源:function.py
示例8: start_queue_runners
def start_queue_runners(self, sess, queue_runners=None):
"""Start threads for `QueueRunners`.
Note that the queue runners collected in the graph key `QUEUE_RUNNERS`
are already started automatically when you create a session with the
supervisor, so unless you have non-collected queue runners to start
you do not need to call this explicitly.
Args:
sess: A `Session`.
queue_runners: A list of `QueueRunners`. If not specified, we'll use the
list of queue runners gathered in the graph under the key
`GraphKeys.QUEUE_RUNNERS`.
Returns:
The list of threads started for the `QueueRunners`.
Raises:
RuntimeError: If called with eager execution enabled.
@compatibility(eager)
Queues are not compatible with eager execution. To ingest data when eager
execution is enabled, use the `tf.data` API.
@end_compatibility
"""
if context.in_eager_mode():
raise RuntimeError("Queues are not compatible with eager execution.")
if queue_runners is None:
queue_runners = self._graph.get_collection(ops.GraphKeys.QUEUE_RUNNERS)
threads = []
for qr in queue_runners:
threads.extend(qr.create_threads(sess, coord=self._coord, daemon=True,
start=True))
return threads
开发者ID:AbhinavJain13,项目名称:tensorflow,代码行数:34,代码来源:supervisor.py
示例9: losses
def losses(self):
"""Retrieve the network's losses.
Will only include losses that are either
unconditional, or conditional on inputs to this model
(e.g. will not include losses that depend on tensors
that aren't inputs to this model).
Returns:
A list of loss tensors.
"""
losses = []
if context.in_eager_mode():
for layer in self.layers:
losses += layer.losses
return losses
for layer in self.layers:
losses += layer.losses
relevant_inputs = []
for i in range(len(self._inbound_nodes)):
inputs = self.get_input_at(i)
if isinstance(inputs, list):
relevant_inputs += inputs
else:
relevant_inputs.append(inputs)
reachable = layers_util.get_reachable_from_inputs(relevant_inputs, losses)
relevant_conditional_losses = [x for x in losses if x in reachable]
unconditional_losses = [
x for x in losses if x._unconditional_loss] # pylint: disable=protected-access
return list(set(
relevant_conditional_losses + unconditional_losses + self._losses))
开发者ID:japrogramer,项目名称:tensorflow,代码行数:33,代码来源:network.py
示例10: decorator
def decorator(hp_fn, registration_name=None):
"""Registers & returns hp_fn with registration_name or default name."""
hp_name = registration_name or default_name(hp_fn)
if hp_name in _HPARAMS and not context.in_eager_mode():
raise LookupError("HParams set %s already registered." % hp_name)
_HPARAMS[hp_name] = hp_fn
return hp_fn
开发者ID:chqiwang,项目名称:tensor2tensor,代码行数:7,代码来源:registry.py
示例11: _convert_to_graph_constant
def _convert_to_graph_constant(value, dtype=None, name=None, as_ref=False):
"""Captures a tfe Tensor while building a graph mode function.
Creates a placeholder to pass the tensor as an argument.
Arguments:
value: A tfe.Tensor object
dtype: The datatype of the value produced by the node in the graph.
name: Name of the node in the graph.
as_ref: Ignored (required by register_tensor_conversion_function).
Returns:
A placeholder which will, at runtime, have the value of this tensor.
Raises:
ValueError: if called outside a defun context.
"""
if context.in_eager_mode():
return value
_ = as_ref
tensor_map = _scoped_captures.tensors
if tensor_map is None:
raise ValueError(
"Trying to use tfe.Tensor objects in a graph outside graph mode. "
"To build a graph use tfe.defun or tfe.make_template.")
captured_value = tensor_map.get(ops.tensor_id(value), None)
if captured_value is None:
captured_value = graph_placeholder(
dtype=dtype or value.dtype, shape=value.shape, name=name)
if captured_value.dtype == dtypes.resource:
captured_value._handle_data = value._handle_data # pylint: disable=protected-access
tensor_map[ops.tensor_id(value)] = (value, captured_value)
else:
captured_value = captured_value[1]
return captured_value
开发者ID:chdinh,项目名称:tensorflow,代码行数:35,代码来源:function.py
示例12: zero_state
def zero_state(self, batch_size, dtype):
"""Return zero-filled state tensor(s).
Args:
batch_size: int, float, or unit Tensor representing the batch size.
dtype: the data type to use for the state.
Returns:
If `state_size` is an int or TensorShape, then the return value is a
`N-D` tensor of shape `[batch_size, state_size]` filled with zeros.
If `state_size` is a nested list or tuple, then the return value is
a nested list or tuple (of the same structure) of `2-D` tensors with
the shapes `[batch_size, s]` for each s in `state_size`.
"""
# Try to use the last cached zero_state. This is done to avoid recreating
# zeros, especially when eager execution is enabled.
state_size = self.state_size
is_eager = context.in_eager_mode()
if is_eager and hasattr(self, "_last_zero_state"):
(last_state_size, last_batch_size, last_dtype,
last_output) = getattr(self, "_last_zero_state")
if (last_batch_size == batch_size and
last_dtype == dtype and
last_state_size == state_size):
return last_output
with ops.name_scope(type(self).__name__ + "ZeroState", values=[batch_size]):
output = _zero_state_tensors(state_size, batch_size, dtype)
if is_eager:
self._last_zero_state = (state_size, batch_size, dtype, output)
return output
开发者ID:japrogramer,项目名称:tensorflow,代码行数:31,代码来源:rnn_cell_impl.py
示例13: testMultiIONetworkbuilding
def testMultiIONetworkbuilding(self):
input_a = network_layers.Input(shape=(32,))
input_b = network_layers.Input(shape=(16,))
a = core_layers.Dense(16)(input_a)
class AddLayer(base_layers.Layer):
def call(self, inputs):
return inputs[0] + inputs[1]
def compute_output_shape(self, input_shape):
return input_shape[0]
c = AddLayer()([a, input_b]) # pylint: disable=not-callable
c = core_layers.Dense(2)(c)
network = network_layers.GraphNetwork([input_a, input_b], [a, c])
if context.in_eager_mode():
a_val = constant_op.constant(
np.random.random((10, 32)).astype('float32'))
b_val = constant_op.constant(
np.random.random((10, 16)).astype('float32'))
outputs = network([a_val, b_val])
self.assertEqual(len(outputs), 2)
self.assertEqual(outputs[0].shape.as_list(), [10, 16])
self.assertEqual(outputs[1].shape.as_list(), [10, 2])
开发者ID:ChengYuXiang,项目名称:tensorflow,代码行数:26,代码来源:network_test.py
示例14: _convert_to_graph_tensor
def _convert_to_graph_tensor(value, dtype=None, name=None, as_ref=False):
"""Captures a Tensor while building a graph mode function.
Arguments:
value: A Tensor object.
dtype: The datatype of the value produced by the node in the graph.
name: Name of the node in the graph.
as_ref: Ignored (required by register_tensor_conversion_function).
Returns:
Returns a constant (the current value of the tensor) if capturing
is not enabled. A placeholder which will have the value of the
tensor at runtime otherwise.
"""
if context.in_eager_mode():
return value
_ = as_ref
tensor_map = _scoped_captures.tensors
if tensor_map is None:
# Capturing is not enabled.
return constant_op.constant(value.numpy())
captured_value = tensor_map.get(ops.tensor_id(value), None)
if captured_value is None:
captured_value = graph_placeholder(
dtype=dtype or value.dtype, shape=value.shape, name=name)
if captured_value.dtype == dtypes.resource:
captured_value._handle_data = value._handle_data # pylint: disable=protected-access
tensor_map[ops.tensor_id(value)] = (value, captured_value)
else:
captured_value = captured_value[1]
tape.record_operation("captured_value", [captured_value], [value], [],
lambda x: x)
return captured_value
开发者ID:Mazecreator,项目名称:tensorflow,代码行数:33,代码来源:function.py
示例15: losses
def losses(self):
"""Retrieve the network's losses.
Will only include losses that are either
unconditional, or conditional on inputs to this model
(e.g. will not include losses that depend on tensors
that aren't inputs to this model).
Returns:
A list of loss tensors.
"""
losses = []
if context.in_eager_mode():
for layer in self.layers:
losses += layer.losses
return losses
# Retrieve losses for all internal layers.
for layer in self.layers:
if hasattr(layer, 'losses'):
# Collect losses that are dependent on inputs
# that are part of the model.
for node_index, node in enumerate(layer._inbound_nodes): # pylint: disable=protected-access
node_key = _make_node_key(layer.name, node_index)
if node_key in self._network_nodes:
# The model owns this layer node.
inputs = node.input_tensors
losses += layer.get_losses_for(inputs)
# Collect unconditional losses.
losses += layer.get_losses_for(None)
# Add any potential unconditional model-level loss.
losses += self.get_losses_for(None)
return losses
开发者ID:ChengYuXiang,项目名称:tensorflow,代码行数:33,代码来源:network.py
示例16: _get_weights
def _get_weights(self, hidden_dim=None):
"""Create or get concatenated embedding or softmax variable.
Args:
hidden_dim: dim of the variable. Defaults to self._body_input_depth
Returns:
a list of self._num_shards Tensors.
"""
if hidden_dim is None:
hidden_dim = self._body_input_depth
num_shards = self._model_hparams.symbol_modality_num_shards
shards = []
for i in xrange(num_shards):
shard_size = (self._vocab_size // num_shards) + (
1 if i < self._vocab_size % num_shards else 0)
var_name = "weights_%d" % i
shards.append(
tf.get_variable(
var_name, [shard_size, hidden_dim],
initializer=tf.random_normal_initializer(0.0, hidden_dim**-0.5)))
if num_shards == 1:
ret = shards[0]
else:
ret = tf.concat(shards, 0)
# Convert ret to tensor.
if not context.in_eager_mode():
ret = eu.convert_gradient_to_tensor(ret)
return ret
开发者ID:AranKomat,项目名称:tensor2tensor,代码行数:29,代码来源:modalities.py
示例17: variables
def variables(self):
"""A list of variables which encode the current state of `Optimizer`.
Includes slot variables and additional global variables created by the
optimizer in the current default graph.
Returns:
A list of variables.
"""
executing_eagerly = context.in_eager_mode()
current_graph = ops.get_default_graph()
def _from_current_graph(variable):
if executing_eagerly:
# No variable.op in eager mode. We don't expect lots of eager graphs,
# but behavior should be consistent with graph mode.
return variable._container_prefix == current_graph._container_prefix # pylint: disable=protected-access
else:
return variable.op.graph is current_graph
optimizer_variables = [v for v in self._non_slot_variables()
if _from_current_graph(v)]
for _, variable_dict in self._slots.items():
for _, slot_for_variable in variable_dict.items():
if _from_current_graph(slot_for_variable):
optimizer_variables.append(slot_for_variable)
# Sort variables by name so that the return is deterministic.
return sorted(optimizer_variables, key=lambda v: v.name)
开发者ID:SylChan,项目名称:tensorflow,代码行数:28,代码来源:optimizer.py
示例18: __init__
def __init__(self, dataset):
"""Creates a new iterator over the given dataset.
For example:
```python
dataset = tf.contrib.data.Dataset.range(4)
for x in Iterator(dataset):
print(x)
```
Args:
dataset: A `tf.contrib.data.Dataset` object.
Raises:
RuntimeError: When invoked without eager execution enabled.
"""
if not context.in_eager_mode():
raise RuntimeError(
"{} objects only make sense when eager execution is enabled".format(
type(self)))
ds_variant = dataset._as_variant_tensor() # pylint: disable=protected-access
self._output_types = dataset.output_types
self._flat_output_types = nest.flatten(dataset.output_types)
self._flat_output_shapes = nest.flatten(dataset.output_shapes)
self._resource = gen_dataset_ops.iterator(
container="",
shared_name=_iterator_shared_name(),
output_types=self._flat_output_types,
output_shapes=self._flat_output_shapes)
gen_dataset_ops.make_iterator(ds_variant, self._resource)
开发者ID:Crazyonxh,项目名称:tensorflow,代码行数:31,代码来源:datasets.py
示例19: run_evaluation
def run_evaluation(init_op, call_op, results_op, sess=None):
"""Convenience method for running the ops returned by evaluate_on_dataset.
Args:
init_op: An op that initializes/resets evaluation state.
call_op: An op that updates evaluation state on a mini-batch of examples.
Must generate an tf.errors.OutOfRangeError when done.
results_op: A dictionary of tensors that compute the final evaluation
results from the evaluation state.
sess: The Session to run the evaluation in. Defaults to the default
Session.
Returns:
A dictionary of values, parallel to results_op.
Raises:
RuntimeError: if eager execution is enabled.
@compatibility(eager)
Only for graph execution.
@end_compatibility
"""
if context.in_eager_mode():
raise RuntimeError("Evaluator.run_evaluation() not supported when "
"eager execution is enabled.")
sess = sess or ops.get_default_session()
sess.run(init_op)
try:
while True:
sess.run(call_op)
except errors_impl.OutOfRangeError:
pass
return sess.run(results_op)
开发者ID:ChengYuXiang,项目名称:tensorflow,代码行数:33,代码来源:evaluator.py
示例20: global_step
def global_step(sess, global_step_tensor):
"""Small helper to get the global step.
```python
# Creates a variable to hold the global_step.
global_step_tensor = tf.Variable(10, trainable=False, name='global_step')
# Creates a session.
sess = tf.Session()
# Initializes the variable.
print('global_step: %s' % tf.train.global_step(sess, global_step_tensor))
global_step: 10
```
Args:
sess: A TensorFlow `Session` object.
global_step_tensor: `Tensor` or the `name` of the operation that contains
the global step.
Returns:
The global step value.
"""
if context.in_eager_mode():
return int(global_step_tensor.numpy())
return int(sess.run(global_step_tensor))
开发者ID:Mazecreator,项目名称:tensorflow,代码行数:25,代码来源:training_util.py
注:本文中的tensorflow.python.eager.context.in_eager_mode函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。 |
请发表评论