本文整理汇总了Python中tensorflow.python.framework.ops.get_default_session函数的典型用法代码示例。如果您正苦于以下问题:Python get_default_session函数的具体用法?Python get_default_session怎么用?Python get_default_session使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了get_default_session函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。
示例1: test_logging_trainable
def test_logging_trainable(self):
with ops.Graph().as_default() as g, self.test_session(g):
var = variables.Variable(constant_op.constant(42.0), name='foo')
var.initializer.run()
cof = constant_op.constant(1.0)
loss = math_ops.subtract(
math_ops.multiply(var, cof), constant_op.constant(1.0))
train_step = gradient_descent.GradientDescentOptimizer(0.5).minimize(loss)
ops.get_default_session().run(train_step)
self._run_monitor(learn.monitors.LoggingTrainable('foo'))
self.assertRegexpMatches(str(self.logged_message), var.name)
开发者ID:AliMiraftab,项目名称:tensorflow,代码行数:11,代码来源:monitors_test.py
示例2: _to_numpy
def _to_numpy(a):
"""Converts tensors to numpy arrays.
Converts Tensors and EagerTensors to numpy arrays.
When eager execution is enabled, converts IndexedSlices
to IndexedSlicesValue with numpy indices/values
Args:
a: any value.
Returns:
If a is EagerTensor or Tensor, returns the evaluation of a by calling
numpy() or run().
If a is IndexedSlices and eager execution is enabled, calls numpy() on a's
fields. Otherwise returns a unchanged.
"""
if isinstance(a, ops.EagerTensor):
return a.numpy()
if isinstance(a, ops.Tensor):
sess = ops.get_default_session()
return sess.run(a)
if isinstance(a, ops.IndexedSlices) and context.executing_eagerly():
return ops.IndexedSlicesValue(
indices=[x.numpy() for x in a.indices],
values=[x.numpy() for x in a.values],
dense_shape=a.dense_shape)
return a
开发者ID:aeverall,项目名称:tensorflow,代码行数:27,代码来源:gradient_checker_v2.py
示例3: _to_numpy
def _to_numpy(a):
"""Converts Tensors, EagerTensors, and IndexedSlicesValue to numpy arrays.
Args:
a: any value.
Returns:
If a is EagerTensor or Tensor, returns the evaluation of a by calling
numpy() or run(). If a is IndexedSlicesValue, constructs the corresponding
dense numpy array. Otherwise returns a unchanged.
"""
if isinstance(a, ops.EagerTensor):
return a.numpy()
if isinstance(a, ops.Tensor):
sess = ops.get_default_session()
return sess.run(a)
if isinstance(a, ops.IndexedSlicesValue):
arr = np.zeros(a.dense_shape)
assert len(a.values) == len(a.indices), (
"IndexedSlicesValue has %s value slices but %s indices\n%s" %
(a.values, a.indices, a))
for values_slice, index in zip(a.values, a.indices):
assert 0 <= index < len(arr), (
"IndexedSlicesValue has invalid index %s\n%s" % (index, a))
arr[index] += values_slice
return arr
return a
开发者ID:terrytangyuan,项目名称:tensorflow,代码行数:27,代码来源:gradient_checker_v2.py
示例4: start_queue_runners
def start_queue_runners(sess=None, coord=None, daemon=True, start=True,
collection=ops.GraphKeys.QUEUE_RUNNERS):
"""Starts all queue runners collected in the graph.
This is a companion method to `add_queue_runner()`. It just starts
threads for all queue runners collected in the graph. It returns
the list of all threads.
Args:
sess: `Session` used to run the queue ops. Defaults to the
default session.
coord: Optional `Coordinator` for coordinating the started threads.
daemon: Whether the threads should be marked as `daemons`, meaning
they don't block program exit.
start: Set to `False` to only create the threads, not start them.
collection: A `GraphKey` specifying the graph collection to
get the queue runners from. Defaults to `GraphKeys.QUEUE_RUNNERS`.
Returns:
A list of threads.
"""
if sess is None:
sess = ops.get_default_session()
if not sess:
raise ValueError("Cannot start queue runners: No default session is "
"registered. Use `with sess.as_default()` or pass an "
"explicit session to tf.start_queue_runners(sess=sess)")
with sess.graph.as_default():
threads = []
for qr in ops.get_collection(collection):
threads.extend(qr.create_threads(sess, coord=coord, daemon=daemon,
start=start))
return threads
开发者ID:KalraA,项目名称:tensorflow,代码行数:33,代码来源:queue_runner.py
示例5: _prepare
def _prepare(f, xs_dtypes):
"""Return a function that executes 'f'.
In TF 2.x, this is the same as `f`.
In TF 1.x, returns a Python function that executes the graph defined by `f`
in a Session.
Args:
f: the function.
xs_dtypes: dtypes of f's arguments.
Returns:
a function that will be evaluated in both graph and eager mode
"""
if context.executing_eagerly():
def decorated_eager(*xs_data):
return f(*map(ops.convert_to_tensor, xs_data))
return decorated_eager
xs = [array_ops.placeholder(x_dtype) for x_dtype in xs_dtypes]
y = f(*xs)
sess = ops.get_default_session()
def decorated_graph(*xs_data):
xs_data = [_to_numpy(a) for a in xs_data]
return sess.run(y, feed_dict=dict(zip(xs, xs_data)))
return decorated_graph
开发者ID:terrytangyuan,项目名称:tensorflow,代码行数:27,代码来源:gradient_checker_v2.py
示例6: _operator_and_matrix
def _operator_and_matrix(self, build_info, dtype, use_placeholder):
sess = ops.get_default_session()
shape = list(build_info.shape)
# Either 1 or 2 matrices, depending.
num_operators = rng.randint(low=1, high=3)
matrices = [
linear_operator_test_util.random_positive_definite_matrix(
shape, dtype, force_well_conditioned=True)
for _ in range(num_operators)
]
lin_op_matrices = matrices
if use_placeholder:
lin_op_matrices = [
array_ops.placeholder_with_default(
matrix, shape=None) for matrix in matrices]
operator = linalg.LinearOperatorComposition(
[linalg.LinearOperatorFullMatrix(l) for l in lin_op_matrices],
is_square=True)
matmul_order_list = list(reversed(matrices))
mat = matmul_order_list[0]
for other_mat in matmul_order_list[1:]:
mat = math_ops.matmul(other_mat, mat)
return operator, mat
开发者ID:LongJun123456,项目名称:tensorflow,代码行数:29,代码来源:linear_operator_composition_test.py
示例7: run_restore_ops
def run_restore_ops(self, session=None):
"""Load the name-based training checkpoint using a new `tf.train.Saver`."""
if session is None and not context.executing_eagerly():
session = ops.get_default_session()
with ops.device("/cpu:0"):
saver_lib.Saver(self._object_saver._global_variable_names()).restore( # pylint: disable=protected-access
sess=session, save_path=self._save_path)
开发者ID:Jackiefan,项目名称:tensorflow,代码行数:7,代码来源:checkpointable_utils.py
示例8: run_evaluation
def run_evaluation(init_op, call_op, results_op, sess=None):
"""Convenience method for running the ops returned by evaluate_on_dataset.
Args:
init_op: An op that initializes/resets evaluation state.
call_op: An op that updates evaluation state on a mini-batch of examples.
Must generate an tf.errors.OutOfRangeError when done.
results_op: A dictionary of tensors that compute the final evaluation
results from the evaluation state.
sess: The Session to run the evaluation in. Defaults to the default
Session.
Returns:
A dictionary of values, parallel to results_op.
Raises:
RuntimeError: if eager execution is enabled.
@compatibility(eager)
Only for graph execution.
@end_compatibility
"""
if context.in_eager_mode():
raise RuntimeError("Evaluator.run_evaluation() not supported when "
"eager execution is enabled.")
sess = sess or ops.get_default_session()
sess.run(init_op)
try:
while True:
sess.run(call_op)
except errors_impl.OutOfRangeError:
pass
return sess.run(results_op)
开发者ID:ChengYuXiang,项目名称:tensorflow,代码行数:33,代码来源:evaluator.py
示例9: save
def save(self, file_prefix, session=None):
"""Save a training checkpoint.
The saved checkpoint includes variables created by this object and any
checkpointable objects it depends on at the time `Checkpoint.save()` is
called.
Args:
file_prefix: A prefix to use for the checkpoint filenames
(/path/to/directory/and_a_prefix). Names are generated based on this
prefix and `Checkpoint.save_counter`.
session: The session to evaluate variables in. Ignored when executing
eagerly. If not provided when graph building, the default session is
used.
Returns:
The full path to the checkpoint.
"""
in_graph_mode = not context.executing_eagerly()
if in_graph_mode:
if session is None:
session = ops.get_default_session()
if self._save_counter is None:
# When graph building, if this is a new save counter variable then it
# needs to be initialized before assign_add. This is only an issue if
# restore() has not been called first.
session.run(self.save_counter.initializer)
with ops.colocate_with(self.save_counter):
assign_op = self.save_counter.assign_add(1)
if in_graph_mode:
session.run(assign_op)
return self._saver.save(
file_prefix=file_prefix,
checkpoint_number=self.save_counter,
session=session)
开发者ID:Jackiefan,项目名称:tensorflow,代码行数:35,代码来源:checkpointable_utils.py
示例10: applyOptimizer
def applyOptimizer(self, opt, dtype, steps=5, is_sparse=False):
if is_sparse:
var0 = variables.Variable([[0.0], [0.0]], dtype=dtype)
var1 = variables.Variable([[0.0], [0.0]], dtype=dtype)
grads0 = ops.IndexedSlices(
constant_op.constant([0.1], shape=[1, 1], dtype=dtype),
constant_op.constant([0]), constant_op.constant([2, 1]))
grads1 = ops.IndexedSlices(
constant_op.constant([0.02], shape=[1, 1], dtype=dtype),
constant_op.constant([1]), constant_op.constant([2, 1]))
else:
var0 = variables.Variable([0.0, 0.0], dtype=dtype)
var1 = variables.Variable([0.0, 0.0], dtype=dtype)
grads0 = constant_op.constant([0.1, 0.2], dtype=dtype)
grads1 = constant_op.constant([0.01, 0.02], dtype=dtype)
update = opt.apply_gradients(zip([grads0, grads1], [var0, var1]))
variables.global_variables_initializer().run()
sess = ops.get_default_session()
v0_val, v1_val = sess.run([var0, var1])
if is_sparse:
self.assertAllCloseAccordingToType([[0.0], [0.0]], v0_val)
self.assertAllCloseAccordingToType([[0.0], [0.0]], v1_val)
else:
self.assertAllCloseAccordingToType([0.0, 0.0], v0_val)
self.assertAllCloseAccordingToType([0.0, 0.0], v1_val)
# Run Ftrl for a few steps
for _ in range(steps):
update.run()
v0_val, v1_val = sess.run([var0, var1])
return v0_val, v1_val
开发者ID:clsung,项目名称:tensorflow,代码行数:34,代码来源:ftrl_test.py
示例11: initialize
def initialize(
graph=None, # pylint: disable=redefined-outer-name
session=None):
"""Initializes summary writing for graph execution mode.
This helper method provides a higher-level alternative to using
@{tf.contrib.summary.summary_writer_initializer_op} and
@{tf.contrib.summary.graph}.
Most users will also want to call @{tf.train.create_global_step}
which can happen before or after this function is called.
Args:
graph: A @{tf.Graph} or @{tf.GraphDef} to output to the writer.
This function will not write the default graph by default. When
writing to an event log file, the associated step will be zero.
session: So this method can call @{tf.Session.run}. This defaults
to @{tf.get_default_session}.
Raises:
RuntimeError: If in eager mode, or if the current thread has no
default @{tf.contrib.summary.SummaryWriter}.
ValueError: If session wasn't passed and no default session.
"""
if context.context().summary_writer_resource is None:
raise RuntimeError("No default tf.contrib.summary.SummaryWriter found")
if session is None:
session = ops.get_default_session()
if session is None:
raise ValueError("session must be passed if no default session exists")
session.run(summary_writer_initializer_op())
if graph is not None:
data = _serialize_graph(graph)
x = array_ops.placeholder(dtypes.string)
session.run(_graph(x, 0), feed_dict={x: data})
开发者ID:ChengYuXiang,项目名称:tensorflow,代码行数:35,代码来源:summary_ops.py
示例12: initialize_or_restore
def initialize_or_restore(self, session=None):
"""Run operations to initialize or restore objects in the dependency graph.
Any objects in the dependency graph which have initializers but are not in
the checkpoint will have those initializers run, unless those variables are
being restored by a later call to `tf.train.Checkpoint.restore()`.
This method has a sibling in `InitializationOnlyStatus` which instead
initializes variables. That type is returned if no checkpoint is specified
in `Saver.restore`.
Args:
session: The session to run init/restore ops in. If `None`, uses the
default session.
"""
if context.executing_eagerly():
return # Initialization and restoration ops are run eagerly
if session is None:
session = ops.get_default_session()
all_objects = list_objects(self._root_checkpointable)
already_initialized_objects = set(
self._checkpoint.object_by_proto_id.values())
initializers_for_non_restored_variables = [
c.initializer for c in all_objects
if hasattr(c, "initializer")
and c not in already_initialized_objects
and (getattr(c, "_update_uid", self._checkpoint.restore_uid - 1)
< self._checkpoint.restore_uid)]
self.run_restore_ops(session=session)
session.run(initializers_for_non_restored_variables)
开发者ID:Jackiefan,项目名称:tensorflow,代码行数:30,代码来源:checkpointable_utils.py
示例13: _run_monitor
def _run_monitor(self,
monitor,
num_epochs=3,
num_steps_per_epoch=10,
pass_max_steps=True):
if pass_max_steps:
max_steps = num_epochs * num_steps_per_epoch - 1
else:
max_steps = None
monitor.begin(max_steps=max_steps)
for epoch in xrange(num_epochs):
monitor.epoch_begin(epoch)
should_stop = False
step = epoch * num_steps_per_epoch
next_epoch_step = step + num_steps_per_epoch
while (not should_stop) and (step < next_epoch_step):
tensors = monitor.step_begin(step)
output = ops.get_default_session().run(tensors) if tensors else {}
output = dict(
zip([t.name if isinstance(t, ops.Tensor) else t for t in tensors],
output))
should_stop = monitor.step_end(step=step, output=output)
monitor.post_step(step=step, session=None)
step += 1
monitor.epoch_end(epoch)
monitor.end()
开发者ID:AliMiraftab,项目名称:tensorflow,代码行数:26,代码来源:monitors_test.py
示例14: assert_bijective_and_finite
def assert_bijective_and_finite(bijector, x, y, atol=0, rtol=1e-5, sess=None):
"""Assert that forward/inverse (along with jacobians) are inverses and finite.
It is recommended to use x and y values that are very very close to the edge
of the Bijector's domain.
Args:
bijector: A Bijector instance.
x: np.array of values in the domain of bijector.forward.
y: np.array of values in the domain of bijector.inverse.
atol: Absolute tolerance.
rtol: Relative tolerance.
sess: TensorFlow session. Defaults to the default session.
Raises:
AssertionError: If tests fail.
"""
sess = sess or ops.get_default_session()
# These are the incoming points, but people often create a crazy range of
# values for which these end up being bad, especially in 16bit.
assert_finite(x)
assert_finite(y)
f_x = bijector.forward(x)
g_y = bijector.inverse(y)
[
x_from_x,
y_from_y,
ildj_f_x,
fldj_x,
ildj_y,
fldj_g_y,
f_x_v,
g_y_v,
] = sess.run([
bijector.inverse(f_x),
bijector.forward(g_y),
bijector.inverse_log_det_jacobian(f_x),
bijector.forward_log_det_jacobian(x),
bijector.inverse_log_det_jacobian(y),
bijector.forward_log_det_jacobian(g_y),
f_x,
g_y,
])
assert_finite(x_from_x)
assert_finite(y_from_y)
assert_finite(ildj_f_x)
assert_finite(fldj_x)
assert_finite(ildj_y)
assert_finite(fldj_g_y)
assert_finite(f_x_v)
assert_finite(g_y_v)
np.testing.assert_allclose(x_from_x, x, atol=atol, rtol=rtol)
np.testing.assert_allclose(y_from_y, y, atol=atol, rtol=rtol)
np.testing.assert_allclose(-ildj_f_x, fldj_x, atol=atol, rtol=rtol)
np.testing.assert_allclose(-ildj_y, fldj_g_y, atol=atol, rtol=rtol)
开发者ID:1000sprites,项目名称:tensorflow,代码行数:60,代码来源:bijector_test_util.py
示例15: save
def save(self, file_prefix, checkpoint_number=None, session=None):
"""Save a training checkpoint.
The saved checkpoint includes variables created by this object and any
Checkpointable objects it depends on at the time `Saver.save()` is called.
Args:
file_prefix: A prefix to use for the checkpoint filenames
(/path/to/directory/and_a_prefix). Names are generated based on this
prefix and `checkpoint_number`, if provided.
checkpoint_number: An integer variable or Tensor, used to number
checkpoints. Typically this value is saved along with other variables in
training checkpoints, which will happen automatically if it was created
by `root_checkpointable` or one of its dependencies (via
`Checkpointable._add_variable`).
session: The session to evaluate variables in. Ignored when executing
eagerly. If not provided when graph building, the default session is
used.
Returns:
The full path to the checkpoint.
"""
named_variables, graph_proto = _serialize_object_graph(
self._root_checkpointable)
if not context.executing_eagerly():
if session is None:
session = ops.get_default_session()
if self._object_graph_feed_tensor is None:
with ops.device("/cpu:0"):
self._object_graph_feed_tensor = constant_op.constant(
"", dtype=dtypes.string)
object_graph_tensor = self._object_graph_feed_tensor
feed_additions = {object_graph_tensor: graph_proto.SerializeToString()}
else:
session = None
with ops.device("/cpu:0"):
object_graph_tensor = constant_op.constant(
graph_proto.SerializeToString(), dtype=dtypes.string)
feed_additions = None
assert _OBJECT_GRAPH_PROTO_KEY not in named_variables
named_variables[_OBJECT_GRAPH_PROTO_KEY] = _NoRestoreSaveable(
tensor=object_graph_tensor,
name=_OBJECT_GRAPH_PROTO_KEY)
if self._last_save_object_graph != graph_proto:
if self._last_save_object_graph is not None:
self._last_save_saver = _copy_saver_with_new_var_list(
old_saver=self._last_save_saver, new_var_list=named_variables)
else:
self._last_save_saver = saver_lib.Saver(var_list=named_variables)
self._last_save_object_graph = graph_proto
with ops.device("/cpu:0"):
save_path = self._last_save_saver.save(
sess=_SessionWithFeedDictAdditions(
session=session, feed_additions=feed_additions),
save_path=file_prefix,
write_meta_graph=False,
global_step=checkpoint_number)
return save_path
开发者ID:houhaichao830,项目名称:tensorflow,代码行数:58,代码来源:checkpointable_utils.py
示例16: start_queue_runners
def start_queue_runners(sess=None, coord=None, daemon=True, start=True,
collection=ops.GraphKeys.QUEUE_RUNNERS):
"""Starts all queue runners collected in the graph.
This is a companion method to `add_queue_runner()`. It just starts
threads for all queue runners collected in the graph. It returns
the list of all threads.
Args:
sess: `Session` used to run the queue ops. Defaults to the
default session.
coord: Optional `Coordinator` for coordinating the started threads.
daemon: Whether the threads should be marked as `daemons`, meaning
they don't block program exit.
start: Set to `False` to only create the threads, not start them.
collection: A `GraphKey` specifying the graph collection to
get the queue runners from. Defaults to `GraphKeys.QUEUE_RUNNERS`.
Raises:
ValueError: if `sess` is None and there isn't any default session.
TypeError: if `sess` is not a `tf.Session` object.
Returns:
A list of threads.
Raises:
RuntimeError: If called with eager execution enabled.
ValueError: If called without a default `tf.Session` registered.
@compatibility(eager)
Not compatible with eager execution. To ingest data under eager execution,
use the `tf.data` API instead.
@end_compatibility
"""
if context.in_eager_mode():
raise RuntimeError("Queues are not compatible with eager execution.")
if sess is None:
sess = ops.get_default_session()
if not sess:
raise ValueError("Cannot start queue runners: No default session is "
"registered. Use `with sess.as_default()` or pass an "
"explicit session to tf.start_queue_runners(sess=sess)")
if not isinstance(sess, session.SessionInterface):
# Following check is due to backward compatibility. (b/62061352)
if sess.__class__.__name__ in [
"MonitoredSession", "SingularMonitoredSession"]:
return []
raise TypeError("sess must be a `tf.Session` object. "
"Given class: {}".format(sess.__class__))
with sess.graph.as_default():
threads = []
for qr in ops.get_collection(collection):
threads.extend(qr.create_threads(sess, coord=coord, daemon=daemon,
start=start))
return threads
开发者ID:AbhinavJain13,项目名称:tensorflow,代码行数:57,代码来源:queue_runner_impl.py
示例17: save
def save(self, file_prefix, session=None):
assign_op = self.save_counter.assign_add(1)
if context.in_graph_mode():
if session is None:
session = ops.get_default_session()
session.run(assign_op)
return self._saver.save(
file_prefix=file_prefix,
checkpoint_number=self.save_counter,
session=session)
开发者ID:hhu-luqi,项目名称:tensorflow,代码行数:10,代码来源:checkpointable_utils_test.py
示例18: save
def save(self, session=None, checkpoint_number=None):
"""Creates a new checkpoint and manages it.
Args:
session: The session to evaluate variables in. Ignored when executing
eagerly. If not provided when graph building, the default session is
used.
checkpoint_number: An optional integer, or an integer-dtype `Variable` or
`Tensor`, used to number the checkpoint. If `None` (default),
checkpoints are numbered using `checkpoint.save_counter`. Even if
`checkpoint_number` is provided, `save_counter` is still incremented. A
user-provided `checkpoint_number` is not incremented even if it is a
`Variable`.
Returns:
The path to the new checkpoint. It is also recorded in the `checkpoints`
and `latest_checkpoint` properies.
"""
# Save counter logic duplicated from tf.train.Checkpoint, soon to diverge
# slightly with a custom numbering option.
if context.executing_eagerly():
save_counter = self._checkpoint.save_counter
save_counter.assign_add(1)
else:
if session is None:
session = ops.get_default_session()
def _initializing_creator(next_creator, **kwargs):
"""Initialize the save counter if it has been newly created."""
v = next_creator(**kwargs)
session.run(v.initializer)
return v
with variable_scope.variable_creator_scope(_initializing_creator):
save_counter = self._checkpoint.save_counter
if self._save_counter_assign is None:
self._save_counter_assign = save_counter.assign_add(1, read_value=False)
session.run(self._save_counter_assign)
if checkpoint_number is None:
checkpoint_number = save_counter
if not isinstance(checkpoint_number, compat.integral_types):
checkpoint_number = training_util.global_step(
sess=session, global_step_tensor=checkpoint_number)
prefix = "%s-%d" % (self._prefix, checkpoint_number)
save_path = self._checkpoint.write(prefix)
timestamp = time.time()
# If this is an overwritten checkpoint we were previously tracking, delete
# and reinsert it to make sure it goes to the end of the queue.
if save_path in self._maybe_delete:
del self._maybe_delete[save_path]
self._maybe_delete[save_path] = timestamp
self._latest_checkpoint = save_path
self._sweep()
self._record_state()
return save_path
开发者ID:AnishShah,项目名称:tensorflow,代码行数:55,代码来源:checkpoint_management.py
示例19: _compute_theoretical_jacobian
def _compute_theoretical_jacobian(x, x_shape, x_data, dy, dy_shape, dx):
"""Computes the theoretical Jacobian for dy/dx.
Computes the theoretical Jacobian using the ops generated by
compute_gradient().
Args:
x: the tensor "x".
x_shape: the dimensions of x as a tuple or an array of ints.
x_data: a numpy parray as the input data for x
dy: the tensor "dy".
dy_shape: the dimensions of dy as a tuple or an array of ints.
dx: Tensor or IndexedSlices representing dx
Returns:
A 2-d numpy array representing the Jacobian for dy/dx. It has "x_size" rows
and "dy_size" columns where "x_size" is the number of elements in x and
"dy_size" is the number of elements in dy.
"""
# Complex vectors are treated as vectors of twice as many reals.
if x.dtype.is_complex:
x_shape = tuple(x_shape) + (2,)
dy_factor = 2 if dy.dtype.is_complex else 1
# To compute the jacobian, we treat x and y as one-dimensional vectors.
x_size = _product(x_shape)
x_val_size = _product(x_shape[1:]) # This is used for sparse gradients
dy_size = _product(dy_shape) * dy_factor
jacobian = np.zeros((x_size, dy_size),
dtype=x.dtype.real_dtype.as_numpy_dtype)
# For each of the entry of dy, we set this to be 1 and
# everything else to be 0 and compute the backprop -- this will give us one
# one column of the Jacobian matrix.
dy_data = np.zeros(dy_shape, dtype=dy.dtype.as_numpy_dtype)
dy_data_flat = dy_data.ravel().view(dy.dtype.real_dtype.as_numpy_dtype)
sess = ops.get_default_session()
for col in range(dy_size):
dy_data_flat[col] = 1
if isinstance(dx, ops.IndexedSlices):
backprop_indices, backprop_values = sess.run(
[dx.indices, dx.values], feed_dict={x: x_data, dy: dy_data})
for i, v in zip(backprop_indices, backprop_values):
r_begin = i * x_val_size
r_end = r_begin + x_val_size
jacobian[r_begin:r_end, col] += v.flat
else:
assert isinstance(dx, ops.Tensor), "dx = " + str(dx)
backprop = sess.run(dx, feed_dict={x: x_data, dy: dy_data})
jacobian[:, col] = backprop.ravel().view(jacobian.dtype)
dy_data_flat[col] = 0
logging.vlog(1, "Theoretical Jacobian =\n%s", jacobian)
return jacobian
开发者ID:13683116633,项目名称:tensorflow,代码行数:54,代码来源:gradient_checker.py
示例20: evaluate
def evaluate(self, tensors):
"""Evaluates tensors and returns numpy values.
Args:
tensors: A Tensor or a nested list/tuple of Tensors.
Returns:
tensors numpy values.
"""
sess = ops.get_default_session() or self.cached_session()
return sess.run(tensors)
开发者ID:Wajih-O,项目名称:tensorflow,代码行数:11,代码来源:benchmark.py
注:本文中的tensorflow.python.framework.ops.get_default_session函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。 |
请发表评论