本文整理汇总了Python中tensorflow.contrib.learn.python.learn.utils.checkpoints.load_variable函数的典型用法代码示例。如果您正苦于以下问题:Python load_variable函数的具体用法?Python load_variable怎么用?Python load_variable使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了load_variable函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。
示例1: test_train_skip_train_if_max_step_already_saved
def test_train_skip_train_if_max_step_already_saved(self):
with tf.Graph().as_default() as g, self.test_session(g):
with tf.control_dependencies(self._build_inference_graph()):
train_op = tf.assign_add(tf.contrib.framework.get_global_step(), 1)
learn.graph_actions._monitored_train( # pylint: disable=protected-access
g,
output_dir=self._output_dir,
train_op=train_op,
loss_op=tf.constant(2.0),
max_steps=10)
step = checkpoints.load_variable(
self._output_dir, tf.contrib.framework.get_global_step().name)
self.assertEqual(10, step)
with tf.Graph().as_default() as g, self.test_session(g):
with tf.control_dependencies(self._build_inference_graph()):
train_op = tf.assign_add(tf.contrib.framework.get_global_step(), 1)
learn.graph_actions._monitored_train( # pylint: disable=protected-access
g,
output_dir=self._output_dir,
train_op=train_op,
loss_op=tf.constant(2.0),
max_steps=10)
step = checkpoints.load_variable(
self._output_dir, tf.contrib.framework.get_global_step().name)
self.assertEqual(10, step)
开发者ID:MostafaGazar,项目名称:tensorflow,代码行数:26,代码来源:graph_actions_test.py
示例2: weights_
def weights_(self):
hiddenlayer_weights = [checkpoints.load_variable(
self._model_dir, name=("dnn/hiddenlayer_%d/weights" % i))
for i, _ in enumerate(self._hidden_units)]
logits_weights = [checkpoints.load_variable(
self._model_dir, name="dnn/logits/weights")]
return hiddenlayer_weights + logits_weights
开发者ID:MrCrumpets,项目名称:tensorflow,代码行数:7,代码来源:dnn.py
示例3: bias_
def bias_(self):
hiddenlayer_bias = [
checkpoints.load_variable(self._model_dir, name=("dnn/hiddenlayer_%d/biases" % i))
for i, _ in enumerate(self._hidden_units)
]
logits_bias = [checkpoints.load_variable(self._model_dir, name="dnn/logits/biases")]
centered_bias = [checkpoints.load_variable(self._model_dir, name=_CENTERED_BIAS_WEIGHT)]
return hiddenlayer_bias + logits_bias + centered_bias
开发者ID:pronobis,项目名称:tensorflow,代码行数:8,代码来源:dnn.py
示例4: testGetTensor
def testGetTensor(self):
checkpoint_dir = self.get_temp_dir()
with self.test_session() as session:
v1, v2, v3, v4 = _create_checkpoints(session, checkpoint_dir)
self.assertAllEqual(checkpoints.load_variable(checkpoint_dir, "var1"), v1)
self.assertAllEqual(checkpoints.load_variable(checkpoint_dir, "var2"), v2)
self.assertAllEqual(checkpoints.load_variable(checkpoint_dir, "var3"), v3)
self.assertAllEqual(
checkpoints.load_variable(checkpoint_dir, "useful_scope/var4"), v4)
开发者ID:0-T-0,项目名称:tensorflow,代码行数:9,代码来源:checkpoints_test.py
示例5: get_bias
def get_bias(self, model_dir):
"""Returns the bias of the model.
Args:
model_dir: Directory where model parameters, graph and etc. are saved.
Returns:
The bias weights created by this model.
"""
return [checkpoints.load_variable(
model_dir, name=(self._scope+"/hiddenlayer_%d/biases" % i))
for i, _ in enumerate(self._hidden_units)] + [
checkpoints.load_variable(
model_dir, name=(self._scope+"/logits/biases"))]
开发者ID:10imaging,项目名称:tensorflow,代码行数:14,代码来源:composable_model.py
示例6: print_tensors_in_checkpoint_file
def print_tensors_in_checkpoint_file(file_name, tensor_name):
"""Prints tensors in a checkpoint file.
If no `tensor_name` is provided, prints the tensor names and shapes
in the checkpoint file.
If `tensor_name` is provided, prints the content of the tensor.
Args:
file_name: Name of the checkpoint file.
tensor_name: Name of the tensor in the checkpoint file to print.
"""
try:
if not tensor_name:
variables = checkpoints.list_variables(file_name)
for name, shape in variables:
print("%s\t%s" % (name, str(shape)))
else:
print("tensor_name: ", tensor_name)
print(checkpoints.load_variable(file_name, tensor_name))
except Exception as e: # pylint: disable=broad-except
print(str(e))
if "corrupted compressed block contents" in str(e):
print("It's likely that your checkpoint file has been compressed "
"with SNAPPY.")
开发者ID:Baaaaam,项目名称:tensorflow,代码行数:25,代码来源:inspect_checkpoint.py
示例7: weights_
def weights_(self):
values = {}
optimizer_regex = r".*/" + self._optimizer.get_name() + r"(_\d)?$"
for name, _ in checkpoints.list_variables(self._model_dir):
if name.startswith("linear/") and name != "linear/bias_weight" and not re.match(optimizer_regex, name):
values[name] = checkpoints.load_variable(self._model_dir, name)
if len(values) == 1:
return values[list(values.keys())[0]]
return values
开发者ID:flyingbirdman,项目名称:tensorflow,代码行数:9,代码来源:linear.py
示例8: test_train_max_steps_is_not_incremental
def test_train_max_steps_is_not_incremental(self):
with tf.Graph().as_default() as g, self.test_session(g):
with tf.control_dependencies(self._build_inference_graph()):
train_op = tf.assign_add(tf.contrib.framework.get_global_step(), 1)
learn.graph_actions.train(g, output_dir=self._output_dir,
train_op=train_op, loss_op=tf.constant(2.0),
max_steps=10)
step = checkpoints.load_variable(
self._output_dir, tf.contrib.framework.get_global_step().name)
self.assertEqual(10, step)
with tf.Graph().as_default() as g, self.test_session(g):
with tf.control_dependencies(self._build_inference_graph()):
train_op = tf.assign_add(tf.contrib.framework.get_global_step(), 1)
learn.graph_actions.train(g, output_dir=self._output_dir,
train_op=train_op, loss_op=tf.constant(2.0),
max_steps=15)
step = checkpoints.load_variable(
self._output_dir, tf.contrib.framework.get_global_step().name)
self.assertEqual(15, step)
开发者ID:MostafaGazar,项目名称:tensorflow,代码行数:20,代码来源:graph_actions_test.py
示例9: get_variable_value
def get_variable_value(self, name):
"""Returns value of the variable given by name.
Args:
name: string, name of the tensor.
Returns:
Numpy array - value of the tensor.
"""
if name.endswith(':0'):
name = name[:-2]
return checkpoints.load_variable(self.model_dir, name)
开发者ID:MMMdata,项目名称:tensorflow,代码行数:12,代码来源:estimator.py
示例10: get_weights
def get_weights(self, model_dir):
"""Returns weights per feature of the linear part.
Args:
model_dir: Directory where model parameters, graph and etc. are saved.
Returns:
The weights created by this model (without the optimizer weights).
"""
all_variables = [name for name, _ in checkpoints.list_variables(model_dir)]
values = {}
optimizer_regex = r".*/" + self._get_optimizer().get_name() + r"(_\d)?$"
for name in all_variables:
if (name.startswith(self._scope + "/") and
name != self._scope + "/bias_weight" and
not re.match(optimizer_regex, name)):
values[name] = checkpoints.load_variable(model_dir, name)
if len(values) == 1:
return values[list(values.keys())[0]]
return values
开发者ID:10imaging,项目名称:tensorflow,代码行数:20,代码来源:composable_model.py
示例11: get_variable_value
def get_variable_value(self, name):
return checkpoints.load_variable(self.model_dir, name)
开发者ID:KalraA,项目名称:tensorflow,代码行数:2,代码来源:linear.py
示例12: bias_
def bias_(self):
return checkpoints.load_variable(self._model_dir,
name="linear/bias_weight")
开发者ID:KalraA,项目名称:tensorflow,代码行数:3,代码来源:linear.py
示例13: _train_internal
def _train_internal(graph,
output_dir,
train_op,
loss_op,
global_step_tensor,
init_op,
init_feed_dict,
init_fn,
log_every_steps,
supervisor_is_chief,
supervisor_master,
supervisor_save_model_secs,
keep_checkpoint_max,
supervisor_save_summaries_steps,
feed_fn,
steps,
fail_on_nan_loss,
monitors,
max_steps):
"""See train."""
if (steps is not None) and (max_steps is not None):
raise ValueError('Can not provide both steps and max_steps.')
if not output_dir:
raise ValueError('Output directory should be non-empty %s.' % output_dir)
if train_op is None:
raise ValueError('Missing train_op.')
if loss_op is None:
raise ValueError('Missing loss_op.')
with graph.as_default():
global_step_tensor = contrib_variables.assert_or_get_global_step(
graph, global_step_tensor)
if global_step_tensor is None:
raise ValueError('No "global_step" was provided or found in the graph.')
# Get current step.
try:
start_step = checkpoints.load_variable(
output_dir, global_step_tensor.name)
except (errors.NotFoundError, ValueError):
start_step = 0
summary_writer = (get_summary_writer(output_dir)
if supervisor_is_chief else None)
# Add default chief monitors if none were provided.
if not monitors:
monitors = monitors_lib.get_default_monitors(
loss_op=loss_op,
summary_op=logging_ops.get_summary_op(),
save_summary_steps=supervisor_save_summaries_steps,
summary_writer=summary_writer) if supervisor_is_chief else []
# TODO(ipolosukhin): Replace all functionality of Supervisor
# with Chief-Exclusive Monitors.
if not supervisor_is_chief:
# Prune list of monitor to the ones runnable on all workers.
monitors = [monitor for monitor in monitors if monitor.run_on_all_workers]
if max_steps is None:
max_steps = (start_step + steps) if steps else None
# Start monitors, can create graph parts.
for monitor in monitors:
monitor.begin(max_steps=max_steps)
supervisor = tf_supervisor.Supervisor(
graph,
init_op=init_op or tf_supervisor.Supervisor.USE_DEFAULT,
init_feed_dict=init_feed_dict,
is_chief=supervisor_is_chief,
logdir=output_dir,
saver=_make_saver(graph, keep_checkpoint_max),
global_step=global_step_tensor,
summary_op=None,
summary_writer=summary_writer,
save_model_secs=supervisor_save_model_secs,
init_fn=init_fn)
session = supervisor.PrepareSession(master=supervisor_master,
start_standard_services=True)
supervisor.StartQueueRunners(session)
with session:
get_current_step = lambda: session.run(global_step_tensor)
start_step = get_current_step()
last_step = start_step
last_log_step = start_step
loss_value = None
logging.info('Training steps [%d,%s)', last_step, 'inf'
if max_steps is None else str(max_steps))
excinfo = None
try:
while not supervisor.ShouldStop() and (
(max_steps is None) or (last_step < max_steps)):
start_time = time.time()
feed_dict = feed_fn() if feed_fn is not None else None
outputs, should_stop = _run_with_monitors(
session, last_step + 1, [train_op, loss_op], feed_dict, monitors)
#.........这里部分代码省略.........
开发者ID:2020zyc,项目名称:tensorflow,代码行数:101,代码来源:graph_actions.py
示例14: clusters
def clusters(self):
"""Returns cluster centers."""
return checkpoints.load_variable(self.model_dir, self.CLUSTERS)
开发者ID:31H0B1eV,项目名称:tensorflow,代码行数:3,代码来源:kmeans.py
示例15: train
def train(graph,
output_dir,
train_op,
loss_op,
global_step_tensor=None,
init_op=None,
init_feed_dict=None,
init_fn=None,
log_every_steps=10,
supervisor_is_chief=True,
supervisor_master='',
supervisor_save_model_secs=600,
supervisor_save_summaries_steps=100,
feed_fn=None,
steps=None,
fail_on_nan_loss=True,
monitors=None):
"""Train a model.
Given `graph`, a directory to write outputs to (`output_dir`), and some ops,
run a training loop. The given `train_op` performs one step of training on the
model. The `loss_op` represents the objective function of the training. It is
expected to increment the `global_step_tensor`, a scalar integer tensor
counting training steps. This function uses `Supervisor` to initialize the
graph (from a checkpoint if one is available in `output_dir`), write summaries
defined in the graph, and write regular checkpoints as defined by
`supervisor_save_model_secs`.
Training continues until `global_step_tensor` evaluates to `max_steps`, or, if
`fail_on_nan_loss`, until `loss_op` evaluates to `NaN`. In that case the
program is terminated with exit code 1.
Args:
graph: A graph to train. It is expected that this graph is not in use
elsewhere.
output_dir: A directory to write outputs to.
train_op: An op that performs one training step when run.
loss_op: A scalar loss tensor.
global_step_tensor: A tensor representing the global step. If none is given,
one is extracted from the graph using the same logic as in `Supervisor`.
init_op: An op that initializes the graph. If `None`, use `Supervisor`'s
default.
init_feed_dict: A dictionary that maps `Tensor` objects to feed values.
This feed dictionary will be used when `init_op` is evaluated.
init_fn: Optional callable passed to Supervisor to initialize the model.
log_every_steps: Output logs regularly. The logs contain timing data and the
current loss.
supervisor_is_chief: Whether the current process is the chief supervisor in
charge of restoring the model and running standard services.
supervisor_master: The master string to use when preparing the session.
supervisor_save_model_secs: Save a checkpoint every
`supervisor_save_model_secs` seconds when training.
supervisor_save_summaries_steps: Save summaries every
`supervisor_save_summaries_steps` seconds when training.
feed_fn: A function that is called every iteration to produce a `feed_dict`
passed to `session.run` calls. Optional.
steps: Trains for this many steps (e.g. current global step + `steps`).
fail_on_nan_loss: If true, raise `NanLossDuringTrainingError` if `loss_op`
evaluates to `NaN`. If false, continue training as if nothing happened.
monitors: List of `BaseMonitor` subclass instances. Used for callbacks
inside the training loop.
Returns:
The final loss value.
Raises:
ValueError: If `global_step_tensor` is not provided. See
`tf.contrib.framework.get_global_step` for how we look it up if not
provided explicitly.
NanLossDuringTrainingError: If `fail_on_nan_loss` is `True`, and loss ever
evaluates to `NaN`.
"""
if not output_dir:
raise ValueError('Output directory should be non-empty.')
with graph.as_default():
global_step_tensor = contrib_variables.assert_or_get_global_step(
graph, global_step_tensor)
if global_step_tensor is None:
raise ValueError('No "global_step" was provided or found in the graph.')
# Get current step.
try:
start_step = checkpoints.load_variable(
output_dir, global_step_tensor.name)
except (errors.NotFoundError, ValueError):
start_step = 0
summary_writer = (get_summary_writer(output_dir)
if supervisor_is_chief else None)
# TODO(ipolosukhin): Replace all functionality of Supervisor with Monitors.
if not supervisor_is_chief:
# monitors should run only on the chief.
monitors = []
elif not monitors:
monitors = monitors_lib.get_default_monitors(
loss_op=loss_op,
summary_op=logging_ops.get_summary_op(),
save_summary_steps=supervisor_save_summaries_steps,
#.........这里部分代码省略.........
开发者ID:carloscampo5200,项目名称:tensorflow,代码行数:101,代码来源:graph_actions.py
示例16: _supervised_train
#.........这里部分代码省略.........
supervisor_save_summaries_steps: Save summaries every
`supervisor_save_summaries_steps` seconds when training.
feed_fn: A function that is called every iteration to produce a `feed_dict`
passed to `session.run` calls. Optional.
steps: Trains for this many steps (e.g. current global step + `steps`).
fail_on_nan_loss: If true, raise `NanLossDuringTrainingError` if `loss_op`
evaluates to `NaN`. If false, continue training as if nothing happened.
monitors: List of `BaseMonitor` subclass instances. Used for callbacks
inside the training loop.
max_steps: Number of total steps for which to train model. If `None`,
train forever. Two calls fit(steps=100) means 200 training iterations.
On the other hand two calls of fit(max_steps=100) means, second call
will not do any iteration since first call did all 100 steps.
Returns:
The final loss value.
Raises:
ValueError: If `output_dir`, `train_op`, `loss_op`, or `global_step_tensor`
is not provided. See `tf.contrib.framework.get_global_step` for how we
look up the latter if not provided explicitly.
NanLossDuringTrainingError: If `fail_on_nan_loss` is `True`, and loss ever
evaluates to `NaN`.
ValueError: If both `steps` and `max_steps` are not `None`.
"""
if (steps is not None) and (max_steps is not None):
raise ValueError('Can not provide both steps and max_steps.')
if not output_dir:
raise ValueError('Output directory should be non-empty %s.' % output_dir)
if train_op is None:
raise ValueError('Missing train_op.')
if loss_op is None:
raise ValueError('Missing loss_op.')
if monitors is None:
monitors = []
if not isinstance(monitors, list):
raise ValueError('Monitors should be a list.')
with graph.as_default():
global_step_tensor = contrib_variables.assert_or_get_global_step(
graph, global_step_tensor)
if global_step_tensor is None:
raise ValueError('No "global_step" was provided or found in the graph.')
if max_steps is not None:
try:
start_step = checkpoints.load_variable(output_dir,
global_step_tensor.name)
if max_steps <= start_step:
logging.info('Skipping training since max_steps has already saved.')
return None
except: # pylint: disable=bare-except
pass
with graph.as_default():
# See question about adding the summary writer to the scaffold.
if supervisor_is_chief:
summary_writer = summary_writer_cache.SummaryWriterCache.get(output_dir)
monitors.extend([
monitors_lib.StepCounter(summary_writer=summary_writer),
monitors_lib.NanLoss(loss_op,
fail_on_nan_loss=fail_on_nan_loss),
monitors_lib.PrintTensor({'loss': loss_op.name},
every_n=log_every_steps),
])
# Finalize graph and add savers
# TODO(ispir): remove keep_checkpoint_max from Scaffold interface
scaffold = supervised_session.Scaffold(
global_step_tensor=global_step_tensor,
init_op=init_op,
init_feed_dict=init_feed_dict,
init_fn=init_fn,
keep_checkpoint_max=keep_checkpoint_max)
if supervisor_is_chief:
if scaffold.summary_op is not None:
monitors.append(monitors_lib.SummarySaver(
scaffold.summary_op,
save_steps=supervisor_save_summaries_steps,
summary_writer=summary_writer))
if supervisor_save_model_steps > 0:
monitors.append(
monitors_lib.CheckpointSaver(supervisor_save_model_steps,
scaffold.saver, output_dir))
if steps is not None or max_steps is not None:
monitors.append(monitors_lib.StopAtStep(steps, max_steps))
if not supervisor_is_chief:
# Prune list of monitor to the ones runnable on all workers.
monitors = [monitor for monitor in monitors if monitor.run_on_all_workers]
with supervised_session.SupervisedSession(supervisor_master,
is_chief=supervisor_is_chief,
checkpoint_dir=output_dir,
monitors=monitors,
scaffold=scaffold) as super_sess:
loss = None
while not super_sess.should_stop():
_, loss = super_sess.run([train_op, loss_op], feed_fn() if feed_fn else
None)
return loss
开发者ID:2020zyc,项目名称:tensorflow,代码行数:101,代码来源:graph_actions.py
示例17: testNoTensor
def testNoTensor(self):
checkpoint_dir = self.get_temp_dir()
with self.test_session() as session:
_, _, _, _ = _create_checkpoints(session, checkpoint_dir)
with self.assertRaises(tf.errors.OpError):
self.assertAllEqual(checkpoints.load_variable(checkpoint_dir, "var5"), [])
开发者ID:0-T-0,项目名称:tensorflow,代码行数:6,代码来源:checkpoints_test.py
示例18: clusters
def clusters(self):
"""Returns cluster centers."""
clusters = checkpoints.load_variable(self.model_dir,
gmm_ops.GmmAlgorithm.CLUSTERS_VARIABLE)
return np.squeeze(clusters, 1)
开发者ID:AdamPalmar,项目名称:tensorflow,代码行数:5,代码来源:gmm.py
示例19: covariances
def covariances(self):
"""Returns the covariances."""
return checkpoints.load_variable(
self.model_dir,
gmm_ops.GmmAlgorithm.CLUSTERS_COVS_VARIABLE)
开发者ID:AdamPalmar,项目名称:tensorflow,代码行数:5,代码来源:gmm.py
示例20: _monitored_train
#.........这里部分代码省略.........
will not do any iteration since first call did all 100 steps.
Returns:
The final loss value.
Raises:
ValueError: If `output_dir`, `train_op`, `loss_op`, or `global_step_tensor`
is not provided. See `tf.contrib.framework.get_global_step` for how we
look up the latter if not provided explicitly.
NanLossDuringTrainingError: If `fail_on_nan_loss` is `True`, and loss ever
evaluates to `NaN`.
ValueError: If both `steps` and `max_steps` are not `None`.
"""
if (steps is not None) and (max_steps is not None):
raise ValueError('Can not provide both steps and max_steps.')
if not output_dir:
raise ValueError('Output directory should be non-empty %s.' % output_dir)
if train_op is None:
raise ValueError('Missing train_op.')
if loss_op is None:
raise ValueError('Missing loss_op.')
if hooks is None:
hooks = []
if not isinstance(hooks, list):
raise ValueError('Hooks should be a list.')
with graph.as_default():
global_step_tensor = contrib_variables.assert_or_get_global_step(
graph, global_step_tensor)
if global_step_tensor is None:
raise ValueError('No "global_step" was provided or found in the graph.')
if max_steps is not None:
try:
start_step = checkpoints.load_variable(output_dir,
global_step_tensor.name)
if max_steps <= start_step:
logging.info('Skipping training since max_steps has already saved.')
return None
except: # pylint: disable=bare-except
pass
# Adapted SessionRunHooks such as ExportMonitor depend on the
# CheckpointSaverHook to be executed before they should be executed.
# The `hooks` param comprises of deprecated monitor hooks
# (such as ExportMonitor). Appending them after the basic_session_run_hooks.
all_hooks = []
with graph.as_default():
all_hooks.append(basic_session_run_hooks.NanTensorHook(
loss_op, fail_on_nan_loss=fail_on_nan_loss))
if log_every_steps > 0:
all_hooks.append(basic_session_run_hooks.LoggingTensorHook({
'loss': loss_op.name,
'step': global_step_tensor.name
}, every_n_iter=log_every_steps))
def make_saver():
return tf_saver.Saver(
sharded=True, max_to_keep=keep_checkpoint_max, defer_build=True)
scaffold = monitored_session.Scaffold(
init_op=init_op,
init_feed_dict=init_feed_dict,
init_fn=init_fn,
saver=monitored_session.Scaffold.get_or_default('saver',
ops.GraphKeys.SAVERS,
make_saver))
开发者ID:MostafaGazar,项目名称:tensorflow,代码行数:67,代码来源:graph_actions.py
注:本文中的tensorflow.contrib.learn.python.learn.utils.checkpoints.load_variable函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。 |
请发表评论