本文整理汇总了Python中tensorflow.python.keras.engine.distributed_training_utils.unwrap_values函数的典型用法代码示例。如果您正苦于以下问题:Python unwrap_values函数的具体用法?Python unwrap_values怎么用?Python unwrap_values使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了unwrap_values函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。
示例1: step_fn
def step_fn(ctx, inputs):
"""Clones the model and calls make_predict_function."""
if model._compile_distribution:
distributed_training_utils.clone_model_on_replicas(
model, current_strategy, mode, inputs=inputs)
else:
distributed_training_utils._build_distributed_network(
model, current_strategy, mode, inputs)
(grouped_inputs, grouped_outputs, grouped_updates,
grouped_session_args) = current_strategy.extended.call_for_each_replica(
_per_device_predict_function,
args=(distributed_training_utils.get_distributed_model(
model, ModeKeys.PREDICT),))
(all_inputs, all_outputs, all_updates,
all_session_args) = distributed_training_utils.unwrap_values(
current_strategy, grouped_inputs, grouped_outputs, grouped_updates,
grouped_session_args)
combined_fn = K.function(
all_inputs, all_outputs,
updates=all_updates,
name='distributed_predict_function',
**all_session_args)
for label, output in zip(model.output_names, combined_fn.outputs):
ctx.set_last_step_output(label, output)
return combined_fn.updates_op
开发者ID:ziky90,项目名称:tensorflow,代码行数:30,代码来源:training_distributed.py
示例2: step_fn
def step_fn(ctx, inputs, targets):
"""Clones the model and calls make_train_function."""
# TODO(priyag, sourabhbajaj): Should cache this keyed on input shapes.
clone_model_on_towers(
model,
current_strategy,
make_callback_model=True,
inputs=inputs,
targets=targets)
(grouped_inputs, grouped_outputs, grouped_updates,
grouped_session_args) = current_strategy.call_for_each_tower(
_per_device_train_function, model._grouped_model)
(all_inputs, all_outputs, all_updates,
all_session_args) = distributed_training_utils.unwrap_values(
current_strategy, grouped_inputs, grouped_outputs,
grouped_updates, grouped_session_args, with_loss_tensor=True)
combined_fn = K.Function(
all_inputs, all_outputs,
updates=all_updates,
name='distributed_train_function',
**all_session_args)
# TODO(priyag, sourabhbajaj): Perhaps the aggregation type needs to be
# something else for different outputs.
out_labels = model.metrics_names or []
for label, output in zip(out_labels, combined_fn.outputs):
ctx.set_last_step_output(label, output,
aggregation=distribute_lib.get_loss_reduction())
# TODO(priyag, sourabhbajaj): Ignoring these things from the combined_fn:
# feed_dict, session kwargs, run options, run_metadata for now. These should
# be handled appropriately
return combined_fn.updates_op
开发者ID:clsung,项目名称:tensorflow,代码行数:34,代码来源:training_distributed.py
示例3: step_fn
def step_fn(ctx, *inputs):
"""Clones the model and calls make_predict_function."""
# TODO(priyag, sourabhbajaj): The model gets cloned every time
# fit/test/predict is called. We should look into caching this keyed on
# input shapes.
clone_model_on_replicas(
model,
current_strategy,
make_callback_model=False,
inputs=inputs,
mode=_Mode.PREDICT)
(grouped_inputs, grouped_outputs, grouped_updates,
grouped_session_args) = current_strategy.call_for_each_replica(
_per_device_predict_function, args=(model._grouped_model_predict,))
(all_inputs, all_outputs, all_updates,
all_session_args) = distributed_training_utils.unwrap_values(
current_strategy, grouped_inputs, grouped_outputs, grouped_updates,
grouped_session_args)
combined_fn = K.function(
all_inputs, all_outputs,
updates=all_updates,
name='distributed_predict_function',
**all_session_args)
for label, output in zip(model.output_names, combined_fn.outputs):
ctx.set_last_step_output(label, output)
return combined_fn.updates_op
开发者ID:abhinav-upadhyay,项目名称:tensorflow,代码行数:32,代码来源:training_distributed.py
示例4: _make_eager_execution_function
def _make_eager_execution_function(model, mode):
"""Makes function to run one step of distributed model eager execution."""
strategy = model._distribution_strategy
if not model._grouped_model:
clone_model_on_replicas(
model, strategy, make_callback_model=(mode == 'train'))
def _per_device_function(model):
f = model._make_execution_function(mode)
return (f.inputs, f.outputs)
# NOTE(priyag): Try creating a new FuncGraph within DS scope instead of using
# the global one.
with K.get_graph().as_default(), strategy.scope():
# Create train ops on each of the devices when we call
# `_per_device_fit_function`.
(grouped_inputs, grouped_outputs) = strategy.call_for_each_replica(
_per_device_function, args=(model._grouped_model,))
# Unwrap all the per device values returned from `call_for_each_replica`.
# Unwrapping per device values gives you a list of values that can be
# used to construct a new train function that is composed of inptus/outputs
# on all the devices over which the model is distributed.
(all_inputs, all_outputs, _, _) = distributed_training_utils.unwrap_values(
strategy,
grouped_inputs,
grouped_outputs,
with_loss_tensor=(mode != 'predict'))
return K.function(
all_inputs,
all_outputs,
name='eager_distributed_{}_function'.format(mode))
开发者ID:aeverall,项目名称:tensorflow,代码行数:33,代码来源:training_distributed.py
示例5: _make_execution_function
def _make_execution_function(model, mode):
"""Makes function to run one step of distributed model execution."""
if context.executing_eagerly():
return _make_eager_execution_function(model, mode)
strategy = model._distribution_strategy
if not model._grouped_model:
clone_model_on_replicas(
model, strategy, make_callback_model=(mode == 'train'))
def _per_device_function(model):
f = model._make_execution_function(mode)
return (f.inputs, f.outputs, f.updates_op, f.session_kwargs)
with strategy.scope():
# Create train ops on each of the devices when we call
# `_per_device_fit_function`.
(grouped_inputs, grouped_outputs, grouped_updates,
grouped_session_args) = strategy.extended.call_for_each_replica(
_per_device_function, args=(model._grouped_model,))
if mode == 'train':
# Initialize the variables in the replicated model. This is necessary for
# multi-worker training because on some workers, initialization is not
# needed. This method does initialization or waiting for initialization
# according to the context object of distribute coordinator.
distributed_training_utils.init_restore_or_wait_for_variables()
# Unwrap all the per device values returned from `call_for_each_replica`.
# Unwrapping per device values gives you a list of values that can be
# used to construct a new train function that is composed of update ops on
# all the devices over which the model is distributed.
(all_inputs, all_outputs, all_updates,
all_session_args) = distributed_training_utils.unwrap_values(
strategy,
grouped_inputs,
grouped_outputs,
grouped_updates,
grouped_session_args,
with_loss_tensor=(mode != 'predict'))
return K.function(
all_inputs,
all_outputs,
updates=all_updates,
name='distributed_{}_function'.format(mode),
**all_session_args)
开发者ID:aeverall,项目名称:tensorflow,代码行数:47,代码来源:training_distributed.py
示例6: step_fn
def step_fn(ctx, inputs):
"""A step fn that returns update ops."""
if mode == ModeKeys.PREDICT:
targets = None
else:
inputs, targets = inputs
if model._compile_distribution:
distributed_training_utils.clone_model_on_replicas(
model, strategy, mode, inputs=inputs, targets=targets)
else:
distributed_training_utils._build_distributed_network(
model, strategy, mode, inputs, targets)
(grouped_inputs, grouped_outputs, grouped_updates,
grouped_session_args) = strategy.extended.call_for_each_replica(
_per_device_execution_function,
args=(distributed_training_utils.get_distributed_model(model, mode),))
(all_inputs, all_outputs, all_updates,
all_session_args) = distributed_training_utils.unwrap_values(
strategy, grouped_inputs, grouped_outputs, grouped_updates,
grouped_session_args)
combined_fn = K.function(
all_inputs,
all_outputs,
updates=all_updates,
name='distributed_' + str(mode) + '_function',
**all_session_args)
for label, output in zip(output_labels, combined_fn.outputs):
if mode == ModeKeys.PREDICT:
ctx.set_last_step_output(label, output)
else:
if label == 'loss':
reduce_op = ds_reduce_util.ReduceOp.SUM
else:
# We reduce all other metrics using mean for now. This is temporary
# workaround until new metrics are in place.
reduce_op = ds_reduce_util.ReduceOp.MEAN
ctx.set_last_step_output(label, output, reduce_op)
# TODO(priyag, sourabhbajaj): Ignoring these things from the combined_fn:
# feed_dict, session kwargs, run options, run_metadata for now. These should
# be handled appropriately
return combined_fn.updates_op
开发者ID:kylin9872,项目名称:tensorflow,代码行数:45,代码来源:training_distributed.py
示例7: step_fn
def step_fn(ctx, inputs):
"""Clones the model and calls make_fit_function."""
# TODO(priyag, sourabhbajaj): The model gets cloned every time
# fit/test/predict is called. We should look into caching this keyed on
# input shapes.
inputs, targets = inputs
clone_model_on_replicas(
model,
current_strategy,
make_callback_model=True,
inputs=inputs,
targets=targets,
mode=_Mode.TRAIN)
(grouped_inputs, grouped_outputs, grouped_updates,
grouped_session_args) = current_strategy.call_for_each_replica(
_per_device_fit_function, args=(model._grouped_model_train,))
(all_inputs, all_outputs, all_updates,
all_session_args) = distributed_training_utils.unwrap_values(
current_strategy, grouped_inputs, grouped_outputs,
grouped_updates, grouped_session_args)
combined_fn = K.function(
all_inputs,
all_outputs,
updates=all_updates,
name='distributed_fit_function',
**all_session_args)
for label, output in zip(out_labels, combined_fn.outputs):
if label == 'loss':
reduce_op = distribute_lib.get_loss_reduction()
else:
# We reduce all other metrics using mean for now. This is temporary
# workaround until new metrics are in place.
reduce_op = ds_reduce_util.ReduceOp.MEAN
ctx.set_last_step_output(label, output, reduce_op)
# TODO(priyag, sourabhbajaj): Ignoring these things from the combined_fn:
# feed_dict, session kwargs, run options, run_metadata for now. These should
# be handled appropriately
return combined_fn.updates_op
开发者ID:zhaoyongke,项目名称:tensorflow,代码行数:41,代码来源:training_distributed.py
示例8: step_fn
def step_fn(ctx, inputs):
"""Clones the model and calls make_fit_function."""
inputs, targets = inputs
if model._compile_distribution:
distributed_training_utils.clone_model_on_replicas(
model, current_strategy, ModeKeys.TRAIN, inputs=inputs,
targets=targets)
else:
distributed_training_utils._build_distributed_network(
model, current_strategy, ModeKeys.TRAIN, inputs, targets)
(grouped_inputs, grouped_outputs, grouped_updates,
grouped_session_args) = current_strategy.extended.call_for_each_replica(
_per_device_fit_function, args=(model._distributed_model_train,))
(all_inputs, all_outputs, all_updates,
all_session_args) = distributed_training_utils.unwrap_values(
current_strategy, grouped_inputs, grouped_outputs,
grouped_updates, grouped_session_args)
combined_fn = K.function(
all_inputs,
all_outputs,
updates=all_updates,
name='distributed_fit_function',
**all_session_args)
for label, output in zip(out_labels, combined_fn.outputs):
if label == 'loss':
reduce_op = distribute_lib.get_loss_reduction()
else:
# We reduce all other metrics using mean for now. This is temporary
# workaround until new metrics are in place.
reduce_op = ds_reduce_util.ReduceOp.MEAN
ctx.set_last_step_output(label, output, reduce_op)
# TODO(priyag, sourabhbajaj): Ignoring these things from the combined_fn:
# feed_dict, session kwargs, run options, run_metadata for now. These should
# be handled appropriately
return combined_fn.updates_op
开发者ID:terrytangyuan,项目名称:tensorflow,代码行数:38,代码来源:training_distributed.py
示例9: step_fn
def step_fn(ctx, inputs):
"""Clones the model and calls make_eval_function."""
inputs, targets = inputs
if model._compile_distribution:
distributed_training_utils. clone_model_on_replicas(
model, current_strategy,
make_callback_model=False, inputs=inputs,
targets=targets, mode=distributed_training_utils.ModeKeys.TEST)
else:
distributed_training_utils._build_distributed_network(
model, current_strategy, inputs, targets,
mode=distributed_training_utils.ModeKeys.TEST)
(grouped_inputs, grouped_outputs, grouped_updates,
grouped_session_args) = current_strategy.extended.call_for_each_replica(
_per_device_eval_function, args=(model._distributed_model_test,))
(all_inputs, all_outputs, all_updates,
all_session_args) = distributed_training_utils.unwrap_values(
current_strategy, grouped_inputs, grouped_outputs, grouped_updates,
grouped_session_args)
combined_fn = K.function(
all_inputs, all_outputs,
updates=all_updates,
name='distributed_test_function',
**all_session_args)
for label, output in zip(model.metrics_names, combined_fn.outputs):
if label == 'loss':
reduce_op = distribute_lib.get_loss_reduction()
else:
# We reduce all other metrics using mean for now. This is temporary
# workaround until new metrics are in place.
reduce_op = ds_reduce_util.ReduceOp.MEAN
ctx.set_last_step_output(label, output, reduce_op)
return combined_fn.updates_op
开发者ID:Wajih-O,项目名称:tensorflow,代码行数:38,代码来源:training_distributed.py
示例10: predict_loop
def predict_loop(model, iterator, verbose=0, steps=None):
"""Predict loop for predicting with DistributionStrategy.
Arguments:
model: Keras Model instance.
iterator: Iterator for input data.
verbose: Integer, Verbosity mode 0 or 1.
steps: Total number of steps (batches of samples)
before declaring `_predict_loop` finished.
Ignored with the default value of `None`.
Returns:
Array of predictions (if the model has a single output)
or list of arrays of predictions
(if the model has multiple outputs).
"""
current_strategy = model._distribution_strategy
# TODO(priyag, sourabhbajaj): Remove this when the codepaths are merged.
if current_strategy.__class__.__name__ == 'TPUStrategy':
return _experimental_predict_loop(model, iterator, verbose, steps)
if not model._grouped_model:
clone_model_on_replicas(model, current_strategy)
def _per_device_predict_function(model):
model._make_predict_function()
return (model.predict_function.inputs,
model.predict_function.outputs,
model.predict_function.updates_op,
model.predict_function.session_kwargs)
inputs, _, _ = _get_input_from_iterator(iterator, model)
with current_strategy.scope():
(grouped_inputs, grouped_outputs, grouped_updates,
grouped_session_args) = current_strategy.call_for_each_replica(
_per_device_predict_function, args=(model._grouped_model,))
(all_inputs, all_outputs, all_updates,
all_session_args) = distributed_training_utils.unwrap_values(
current_strategy, grouped_inputs, grouped_outputs, grouped_updates,
grouped_session_args)
dataset_inputs = distributed_training_utils.flatten_perdevice_values(
current_strategy, inputs)
distributed_predict_function = K.function(
all_inputs, all_outputs,
updates=all_updates,
name='distributed_predict_function',
**all_session_args)
if not isinstance(K.learning_phase(), int):
ins = dataset_inputs + [0]
else:
ins = dataset_inputs
if verbose == 1:
progbar = Progbar(target=steps)
# Copy the weights from the original model to each of the replicated models.
orig_model_weights = model.get_weights()
distributed_model = current_strategy.unwrap(model._grouped_model)[0]
distributed_training_utils.set_weights(
current_strategy, distributed_model, orig_model_weights)
num_replicas = current_strategy.num_replicas_in_sync
# Since we do not know how many samples we will see, we cannot
# pre-allocate the returned Numpy arrays. Instead, we store one array per
# batch seen and concatenate them upon returning.
unconcatenated_outs = []
assert steps is not None
for step in range(steps):
batch_outs = distributed_predict_function(ins)
if not isinstance(batch_outs, list):
batch_outs = [batch_outs]
if step == 0:
# batch_outs gives you the number of model outputs. In the distributed
# case this will be number of model_outputs * num_replicas.
for _ in range(len(model.outputs)):
unconcatenated_outs.append([])
for i in range(len(model.outputs)):
nested_outs = batch_outs[i * num_replicas:
i * num_replicas + num_replicas]
outs = nest.flatten(nested_outs)
unconcatenated_outs[i].extend(outs)
if verbose >= 1:
progbar.update(step + 1)
if len(unconcatenated_outs) == 1:
return np.concatenate(unconcatenated_outs[0], axis=0)
return [
np.concatenate(unconcatenated_outs[i], axis=0)
for i in range(len(unconcatenated_outs))
]
开发者ID:abhinav-upadhyay,项目名称:tensorflow,代码行数:94,代码来源:training_distributed.py
示例11: fit_loop
def fit_loop(
model,
iterator,
epochs=100,
verbose=1,
callbacks=None,
val_iterator=None,
initial_epoch=0,
steps_per_epoch=None,
validation_steps=None):
"""Fit loop for training with DistributionStrategy.
Arguments:
model: Keras Model instance.
iterator: Iterator for input data.
epochs: Number of times to iterate over the data
verbose: Integer, Verbosity mode, 0, 1 or 2
callbacks: List of callbacks to be called during training
val_iterator: Iterator for validation data.
initial_epoch: Epoch at which to start training
(useful for resuming a previous training run)
steps_per_epoch: Total number of steps (batches of samples)
before declaring one epoch finished and starting the
next epoch. Ignored with the default value of `None`.
validation_steps: Number of steps to run validation for
(only if doing validation from data tensors).
Ignored with the default value of `None`.
Returns:
`History` object.
Raises:
ValueError: in case of invalid arguments.
"""
current_strategy = model._distribution_strategy
# TODO(priyag, sourabhbajaj): Remove this when the codepaths are merged.
if current_strategy.__class__.__name__ == 'TPUStrategy':
return _experimental_fit_loop(
model, iterator, epochs, verbose, callbacks, initial_epoch,
steps_per_epoch, val_iterator, validation_steps)
if not model._grouped_model:
clone_model_on_replicas(model, current_strategy, make_callback_model=True)
def _per_device_fit_function(model):
model._make_fit_function()
return (model._fit_function.inputs, model._fit_function.outputs,
model._fit_function.updates_op, model._fit_function.session_kwargs)
inputs, targets, sample_weights = _get_input_from_iterator(iterator, model)
with current_strategy.scope():
# Create train ops on each of the devices when we call
# `_per_device_fit_function`.
(grouped_inputs, grouped_outputs, grouped_updates,
grouped_session_args) = current_strategy.call_for_each_replica(
_per_device_fit_function, args=(model._grouped_model,))
# Unwrap all the per device values returned from `call_for_each_replica`.
# Unwrapping per device values gives you a list of values that can be
# used to construct a new train function that is composed of update ops on
# all the devices over which the model is distributed.
(all_inputs, all_outputs, all_updates,
all_session_args) = distributed_training_utils.unwrap_values(
current_strategy, grouped_inputs, grouped_outputs,
grouped_updates, grouped_session_args, with_loss_tensor=True)
# Dataset inputs and targets are also per devices values that need to be
# unwrapped.
dataset_inputs = distributed_training_utils.flatten_perdevice_values(
current_strategy, inputs)
dataset_targets = distributed_training_utils.flatten_perdevice_values(
current_strategy, targets)
# Create a train function that is composed of all the parameters above.
distributed_fit_function = K.function(
all_inputs,
all_outputs,
updates=all_updates,
name='distributed_fit_function',
**all_session_args)
# We need to set sample_weights to None since there are sample weight
# placeholders that are created with default values.
sample_weights = [None for _ in range(
len(model.outputs) * current_strategy.num_replicas_in_sync)]
if not isinstance(K.learning_phase(), int):
ins = dataset_inputs + dataset_targets + sample_weights + [1]
else:
ins = dataset_inputs + dataset_targets
do_validation = False
if validation_steps:
do_validation = True
# Copy the weights from the original model to each of the replicated models.
orig_model_weights = model.get_weights()
distributed_model = current_strategy.unwrap(model._grouped_model)[0]
distributed_training_utils.set_weights(
current_strategy, distributed_model, orig_model_weights)
#.........这里部分代码省略.........
开发者ID:abhinav-upadhyay,项目名称:tensorflow,代码行数:101,代码来源:training_distributed.py
示例12: test_loop
def test_loop(model, iterator, verbose=0, steps=None):
"""Test loop for evaluating with DistributionStrategy.
Arguments:
model: Keras Model instance.
iterator: Iterator for input data.
verbose: Integer, Verbosity mode 0 or 1.
steps: Total number of steps (batches of samples)
before declaring predictions finished.
Ignored with the default value of `None`.
Returns:
Scalar loss (if the model has a single output and no metrics)
or list of scalars (if the model has multiple outputs
and/or metrics). The attribute `model.metrics_names` will give you
the display labels for the outputs.
"""
current_strategy = model._distribution_strategy
# TODO(priyag, sourabhbajaj): Remove this when the codepaths are merged.
if current_strategy.__class__.__name__ == 'TPUStrategy':
return _experimental_test_loop(model, iterator, verbose, steps)
if not model._grouped_model:
clone_model_on_replicas(model, current_strategy)
def _per_device_eval_function(model):
model._make_eval_function()
return (model._eval_function.inputs, model._eval_function.outputs,
model._eval_function.updates_op,
model._eval_function.session_kwargs)
inputs, targets, sample_weights = _get_input_from_iterator(iterator, model)
with current_strategy.scope():
(grouped_inputs, grouped_outputs, grouped_updates,
grouped_session_args) = current_strategy.call_for_each_replica(
_per_device_eval_function, args=(model._grouped_model,))
(all_inputs, all_outputs, all_updates,
all_session_args) = distributed_training_utils.unwrap_values(
current_strategy, grouped_inputs, grouped_outputs, grouped_updates,
grouped_session_args, with_loss_tensor=True)
dataset_inputs = distributed_training_utils.flatten_perdevice_values(
current_strategy, inputs)
dataset_targets = distributed_training_utils.flatten_perdevice_values(
current_strategy, targets)
distributed_test_function = K.function(
all_inputs, all_outputs,
updates=all_updates,
name='distributed_test_function',
**all_session_args)
# We need to set sample_weights to None since there are sample weight
# placeholders that are created with default values.
sample_weights = [None for _ in range(
len(model.outputs) * current_strategy.num_replicas_in_sync)]
if not isinstance(K.learning_phase(), int):
ins = dataset_inputs + dataset_targets + sample_weights + [0]
else:
ins = dataset_inputs + dataset_targets
for m in model.stateful_metric_functions:
m.reset_states()
outs = []
if verbose == 1:
progbar = Progbar(target=steps)
# Copy the weights from the original model to each of the replicated models.
orig_model_weights = model.get_weights()
distributed_model = current_strategy.unwrap(model._grouped_model)[0]
distributed_training_utils.set_weights(
current_strategy, distributed_model, orig_model_weights)
assert steps is not None
for step in range(steps):
batch_outs = distributed_test_function(ins)
if isinstance(batch_outs, list):
if step == 0:
outs = [0.] * len(batch_outs)
outs[0] += batch_outs[0] # index 0 = 'loss'
outs[1:] = batch_outs[1:]
else:
if step == 0:
outs.append(0.)
outs[0] += batch_outs # index 0 = 'loss'
if verbose >= 1:
progbar.update(step + 1)
outs[0] /= steps # index 0 = 'loss'
if len(outs) == 1:
return outs[0]
return outs
开发者ID:abhinav-upadhyay,项目名称:tensorflow,代码行数:95,代码来源:training_distributed.py
示例13: fit_loop
def fit_loop(
model,
inputs,
targets,
epochs=100,
verbose=1,
callbacks=None,
val_inputs=None,
val_targets=None,
initial_epoch=0,
steps_per_epoch=None,
validation_steps=None):
"""fit function when using DistributionStrategy for training.
Arguments:
model: Keras Model instance.
inputs: List of input arrays.
targets: List of target arrays.
epochs: Number of times to iterate over the data
verbose: Verbosity mode, 0, 1 or 2
callbacks: List of callbacks to be called during training
val_inputs: List of input arrays.
val_targets: List of target arrays.
initial_epoch: Epoch at which to start training
(useful for resuming a previous training run)
steps_per_epoch: Total number of steps (batches of samples)
before declaring one epoch finished and starting the
next epoch. Ignored with the default value of `None`.
validation_steps: Number of steps to run validation for
(only if doing validation from data tensors).
Ignored with the default value of `None`.
Returns:
`History` object.
Raises:
ValueError: in case of invalid arguments.
"""
current_strategy = model._distribution_strategy
def _per_device_train_function(model):
model._make_train_function()
return (model.train_function.inputs,
model.train_function.outputs,
model.train_function.updates_op,
model.train_function.session_kwargs)
with current_strategy.scope():
# Create train ops on each of the devices when we call
# `_per_device_train_function`.
(grouped_inputs, grouped_outputs, grouped_updates,
grouped_session_args) = current_strategy.call_for_each_tower(
_per_device_train_function, model._grouped_model)
# Unwrap all the per device values returned from `call_for_each_tower`.
# Unwrapping per device values gives you a list of values that can be
# used to construct a new train function that is composed of update ops on
# all the devices over which the model is distributed.
(all_inputs, all_outputs, all_updates,
all_session_args) = distributed_training_utils.unwrap_values(
current_strategy, grouped_inputs, grouped_outputs,
grouped_updates, grouped_session_args, with_loss_tensor=True)
# Dataset inputs and targets are also per devices values that need to be
# unwrapped.
dataset_inputs = distributed_training_utils.flatten_perdevice_values(
current_strategy, inputs)
dataset_targets = distributed_training_utils.flatten_perdevice_values(
current_strategy, targets)
# Create a train function that is composed of all the parameters above.
distributed_train_function = K.Function(
all_inputs, all_outputs,
updates=all_updates,
name='distributed_train_function',
**all_session_args)
# We need to set sample_weights to None since there are sample weight
# placeholders that are created with default values.
sample_weights = [None for _ in range(len(model.outputs) *
current_strategy.num_towers)]
if model.uses_learning_phase and not isinstance(K.learning_phase(), int):
ins = dataset_inputs + dataset_targets + sample_weights + [1]
else:
ins = dataset_inputs + dataset_targets
do_validation = False
if validation_steps:
do_validation = True
if steps_per_epoch is None:
raise ValueError('Can only use `validation_steps` '
'when doing step-wise '
'training, i.e. `steps_per_epoch` '
'must be set.')
# Copy the weights from the original model to each of the replicated models.
orig_model_weights = model.get_weights()
with current_strategy.scope():
distributed_model = current_strategy.unwrap(model._grouped_model)[0]
distributed_training_utils.set_weights(
current_strategy, distributed_model, orig_model_weights)
#.........这里部分代码省略.........
开发者ID:ZhangXinNan,项目名称:tensorflow,代码行数:101,代码来源:training_distributed.py
示例14: predict_loop
def predict_loop(model, inputs, verbose=0, steps=None):
"""Abstract method to loop over some data in batches.
Arguments:
model: Keras Model instance.
inputs: list of tensors to be fed to `f`.
verbose: verbosity mode.
steps: Total number of steps (batches of samples)
before declaring `_predict_loop` finished.
Ignored with the default value of `None`.
Returns:
Array of predictions (if the model has a single output)
or list of arrays of predictions
(if the model has multiple outputs).
"""
current_strategy = model._distribution_strategy
def _per_device_predict_function(model):
model._make_predict_function()
return (model.predict_function.inputs,
model.predict_function.outputs,
model.predict_function.updates_op,
model.predict_function.session_kwargs)
with current_strategy.scope():
(grouped_inputs, grouped_outputs, grouped_updates,
grouped_session_args) = current_strategy.call_for_each_tower(
_per_device_predict_function, model._grouped_model)
(all_inputs, all_outputs, all_updates,
all_session_args) = distributed_training_utils.unwrap_values(
current_strategy, grouped_inputs, grouped_outputs, grouped_updates,
grouped_session_args)
dataset_inputs = distributed_training_utils.flatten_perdevice_values(
current_strategy, inputs)
distributed_predict_function = K.Function(
all_inputs, all_outputs,
updates=all_updates,
name='distributed_predict_function',
**all_session_args)
if model.uses_learning_phase and not isinstance(K.learning_phase(), int):
ins = dataset_inputs + [0]
else:
ins = dataset_inputs
if verbose == 1:
progbar = Progbar(target=steps)
# Copy the weights from the original model to each of the replicated models.
orig_model_weights = model.get_weights()
with current_strategy.scope():
distributed_model = current_strategy.unwrap(model._grouped_model)[0]
distributed_training_utils.set_weights(
current_strategy, distributed_model, orig_model_weights)
if steps is not None:
# Since we do not know how many samples we will see, we cannot pre-allocate
# the returned Numpy arrays. Instead, we store one array per batch seen
# and concatenate them upon returning.
unconcatenated_outs = []
for step in range(steps):
batch_outs = distributed_predict_function(ins)
if not isinstance(batch_outs, list):
batch_outs = [batch_outs]
if step == 0:
for _ in batch_outs:
unconcatenated_outs.append([])
for i, batch_out in enumerate(batch_outs):
unconcatenated_outs[i].append(batch_out)
if verbose == 1:
progbar.update(step + 1)
if len(unconcatenated_outs) == 1:
return np.concatenate(unconcatenated_outs[0], axis=0)
return [
np.concatenate(unconcatenated_outs[i], axis=0)
for i in range(len(unconcatenated_outs))
]
开发者ID:ZhangXinNan,项目名称:tensorflow,代码行数:80,代码来源:training_distributed.py
示例15: test_loop
def test_loop(model, inputs, targets, verbose=0, steps=None):
"""evaluate method to validate a model that uses DistributionStrategy.
Arguments:
model: Keras Model instance.
inputs: List of input arrays.
targets: List of target arrays.
verbose: verbosity mode.
steps: Total number of steps (batches of samples)
before declaring predictions finished.
Ignored with the default value of `None`.
Returns:
Scalar loss (if the model has a single output and no metrics)
or list of scalars (if the model has multiple outputs
and/or metrics). The attribute `model.metrics_names` will give you
the display labels for the scalar outputs.
"""
current_strategy = model._distribution_strategy
def _per_device_test_function(model):
model._make_test_function()
return (model.test_function.inputs,
model.test_function.outputs,
model.test_function.updates_op,
model.test_function.session_kwargs)
with current_strategy.scope():
(grouped_inputs, grouped_outputs, grouped_updates,
grouped_session_args) = current_strategy.call_for_each_tower(
_per_device_test_function, model._grouped_model)
(all_inputs, all_outputs, all_updates,
all_session_args) = distributed_training_utils.unwrap_values(
current_strategy, grouped_inputs, grouped_outputs, grouped_updates,
grouped_session_args, with_loss_tensor=True)
dataset_inputs = distributed_training_utils.flatten_perdevice_values(
current_strategy, inputs)
dataset_targets = distributed_training_utils.flatten_perdevice_values(
current_strategy, targets)
distributed_test_function = K.Function(
all_inputs, all_outputs,
updates=all_updates,
name='distributed_test_function',
**all_session_args)
# We need to set sample_weights to None since there are sample weight
# placeholders that are created with default values.
sample_weights = [None for _ in range(len(model.outputs) *
current_strategy.num_towers)]
if model.uses_learning_phase and not isinstance(K.learning_phase(), int):
ins = dataset_inputs + dataset_targets + sample_weights + [0]
else:
ins = dataset_inputs + dataset_targets
outs = []
if verbose == 1:
progbar = Progbar(target=steps)
# Copy the weights from the original model to each of the replicated models.
orig_model_weights = model.get_weights()
with current_strategy.scope():
distributed_model = current_strategy.unwrap(model._grouped_model)[0]
distributed_training_utils.set_weights(
current_strategy, distributed_model, orig_model_weights)
if steps is not None:
for step in range(steps):
batch_outs = distributed_test_function(ins)
batch_outs = _aggregate_metrics_across_towers(
len(current_strategy._devices), model.metrics_names, batch_outs)
if isinstance(batch_outs, list):
if step == 0:
for _ in enumerate(batch_outs):
outs.append(0.)
for i, batch_out in enumerate(batch_outs):
outs[i] += batch_out
else:
if step == 0:
outs.append(0.)
outs[0] += batch_outs
if verbose == 1:
progbar.update(step + 1)
for i in range(len(outs)):
outs[i] /= steps
if len(outs) == 1:
return outs[0]
return outs
开发者ID:ZhangXinNan,项目名称:tensorflow,代码行数:90,代码来源:training_distributed.py
注:本文中的tensorflow.python.keras.engine.distributed_training_utils.unwrap_values函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。 |
请发表评论