本文整理汇总了Python中tensorflow.python.keras.backend.name_scope函数的典型用法代码示例。如果您正苦于以下问题:Python name_scope函数的具体用法?Python name_scope怎么用?Python name_scope使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了name_scope函数的17个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。
示例1: __call__
def __call__(self, y_true, y_pred, sample_weight=None):
"""Invokes the `Loss` instance.
Args:
y_true: Ground truth values.
y_pred: The predicted values.
sample_weight: Optional `Tensor` whose rank is either 0, or the same rank
as `y_true`, or is broadcastable to `y_true`. `sample_weight` acts as a
coefficient for the loss. If a scalar is provided, then the loss is
simply scaled by the given value. If `sample_weight` is a tensor of size
`[batch_size]`, then the total loss for each sample of the batch is
rescaled by the corresponding element in the `sample_weight` vector. If
the shape of `sample_weight` matches the shape of `y_pred`, then the
loss of each measurable element of `y_pred` is scaled by the
corresponding value of `sample_weight`.
Returns:
Weighted loss float `Tensor`. If `reduction` is `NONE`, this has the same
shape as `y_true`; otherwise, it is scalar.
Raises:
ValueError: If the shape of `sample_weight` is invalid.
"""
# If we are wrapping a lambda function strip '<>' from the name as it is not
# accepted in scope name.
scope_name = 'lambda' if self.name == '<lambda>' else self.name
graph_ctx = tf_utils.graph_context_for_symbolic_tensors(
y_true, y_pred, sample_weight)
with K.name_scope(scope_name or self.__class__.__name__), graph_ctx:
losses = self.call(y_true, y_pred)
return losses_utils.compute_weighted_loss(
losses, sample_weight, reduction=self._get_reduction())
开发者ID:aritratony,项目名称:tensorflow,代码行数:32,代码来源:losses.py
示例2: compute_weighted_loss
def compute_weighted_loss(losses,
sample_weight=None,
reduction=ReductionV2.SUM_OVER_BATCH_SIZE,
name=None):
"""Computes the weighted loss.
Args:
losses: `Tensor` of shape `[batch_size, d1, ... dN]`.
sample_weight: Optional `Tensor` whose rank is either 0, or the same rank as
`losses`, or be broadcastable to `losses`.
reduction: (Optional) Type of `tf.keras.losses.Reduction` to apply to loss.
Default value is `SUM_OVER_BATCH_SIZE`.
name: Optional name for the op.
Raises:
ValueError: If the shape of `sample_weight` is not compatible with `losses`.
Returns:
Weighted loss `Tensor` of the same type as `losses`. If `reduction` is
`NONE`, this has the same shape as `losses`; otherwise, it is scalar.
"""
ReductionV2.validate(reduction)
# If this function is called directly, then we just default 'AUTO' to
# 'SUM_OVER_BATCH_SIZE'. Eg. Canned estimator use cases.
if reduction == ReductionV2.AUTO:
reduction = ReductionV2.SUM_OVER_BATCH_SIZE
if sample_weight is None:
sample_weight = 1.0
with K.name_scope(name or 'weighted_loss'):
# Save the `reduction` argument for loss normalization when distributing
# to multiple replicas. Used only for estimator + v1 optimizer flow.
ops.get_default_graph()._last_loss_reduction = reduction # pylint: disable=protected-access
# Update dimensions of `sample_weight` to match with `losses` if possible.
losses, _, sample_weight = squeeze_or_expand_dimensions(
losses, None, sample_weight)
losses = ops.convert_to_tensor(losses)
input_dtype = losses.dtype
losses = math_ops.cast(losses, dtypes.float32)
sample_weight = math_ops.cast(sample_weight, dtypes.float32)
try:
# Broadcast weights if possible.
sample_weight = weights_broadcast_ops.broadcast_weights(
sample_weight, losses)
except ValueError:
# Reduce values to same ndim as weight array.
ndim = K.ndim(losses)
weight_ndim = K.ndim(sample_weight)
losses = K.mean(losses, axis=list(range(weight_ndim, ndim)))
sample_weight.shape.assert_is_compatible_with(losses.shape)
weighted_losses = math_ops.multiply(losses, sample_weight)
# Apply reduction function to the individual weighted losses.
loss = reduce_weighted_loss(weighted_losses, reduction)
# Convert the result back to the input type.
loss = math_ops.cast(loss, input_dtype)
return loss
开发者ID:aritratony,项目名称:tensorflow,代码行数:59,代码来源:losses_utils.py
示例3: _distributed_apply
def _distributed_apply(self, distribution, grads_and_vars, name):
"""`apply_gradients` using a `DistributionStrategy`."""
reduced_grads = distribution.extended.batch_reduce_to(
ds_reduce_util.ReduceOp.SUM, grads_and_vars)
var_list = [v for _, v in grads_and_vars]
grads_and_vars = zip(reduced_grads, var_list)
def apply_grad_to_update_var(var, grad):
"""Apply gradient to variable."""
if isinstance(var, ops.Tensor):
raise NotImplementedError("Trying to update a Tensor ", var)
if isinstance(grad, ops.IndexedSlices):
if var.constraint is not None:
raise RuntimeError(
"Cannot use a constraint function on a sparse variable.")
return self._resource_apply_sparse_duplicate_indices(
grad.values, var, grad.indices)
update_op = self._resource_apply_dense(grad, var)
if var.constraint is not None:
with ops.control_dependencies([update_op]):
return var.assign(var.constraint(var))
else:
return update_op
update_ops = []
with backend.name_scope(name or self._name):
for grad, var in grads_and_vars:
scope_name = ("" if ops.executing_eagerly_outside_functions() else
"_" + var.op.name)
with backend.name_scope("update" + scope_name):
update_ops.extend(
distribution.extended.update(
var, apply_grad_to_update_var, args=(grad,), group=False))
any_symbolic = any(isinstance(i, ops.Operation) or
tf_utils.is_symbolic_tensor(i) for i in update_ops)
if not context.executing_eagerly() or any_symbolic:
# If the current context is graph mode or any of the update ops are
# symbolic then the step update should be carried out under a graph
# context. (eager updates execute immediately)
with ops._get_graph_from_inputs(update_ops).as_default(): # pylint: disable=protected-access
with ops.control_dependencies(update_ops):
return self._iterations.assign_add(1).op
return self._iterations.assign_add(1)
开发者ID:aritratony,项目名称:tensorflow,代码行数:45,代码来源:optimizer_v2.py
示例4: __init__
def __init__(self, optimizer, iterations=None): # pylint: disable=super-init-not-called
self.optimizer = optimizer
self._track_checkpointable(optimizer, name='optimizer')
if iterations is None:
with K.name_scope(self.__class__.__name__):
self.iterations = K.variable(0, dtype='int64', name='iterations')
else:
self.iterations = iterations
self._track_checkpointable(self.iterations, name='global_step')
开发者ID:zhaoyongke,项目名称:tensorflow,代码行数:9,代码来源:optimizers.py
示例5: __init__
def __init__(self, lr=0.01, momentum=0., decay=0., nesterov=False, **kwargs):
super(SGD, self).__init__(**kwargs)
with K.name_scope(self.__class__.__name__):
self.iterations = K.variable(0, dtype='int64', name='iterations')
self.lr = K.variable(lr, name='lr')
self.momentum = K.variable(momentum, name='momentum')
self.decay = K.variable(decay, name='decay')
self.initial_decay = decay
self.nesterov = nesterov
开发者ID:sonnyhu,项目名称:tensorflow,代码行数:9,代码来源:optimizers.py
示例6: _separable_conv_block
def _separable_conv_block(ip,
filters,
kernel_size=(3, 3),
strides=(1, 1),
block_id=None):
"""Adds 2 blocks of [relu-separable conv-batchnorm].
Arguments:
ip: Input tensor
filters: Number of output filters per layer
kernel_size: Kernel size of separable convolutions
strides: Strided convolution for downsampling
block_id: String block_id
Returns:
A Keras tensor
"""
channel_dim = 1 if K.image_data_format() == 'channels_first' else -1
with K.name_scope('separable_conv_block_%s' % block_id):
x = Activation('relu')(ip)
x = SeparableConv2D(
filters,
kernel_size,
strides=strides,
name='separable_conv_1_%s' % block_id,
padding='same',
use_bias=False,
kernel_initializer='he_normal')(
x)
x = BatchNormalization(
axis=channel_dim,
momentum=0.9997,
epsilon=1e-3,
name='separable_conv_1_bn_%s' % (block_id))(
x)
x = Activation('relu')(x)
x = SeparableConv2D(
filters,
kernel_size,
name='separable_conv_2_%s' % block_id,
padding='same',
use_bias=False,
kernel_initializer='he_normal')(
x)
x = BatchNormalization(
axis=channel_dim,
momentum=0.9997,
epsilon=1e-3,
name='separable_conv_2_bn_%s' % (block_id))(
x)
return x
开发者ID:Huoxubeiyin,项目名称:tensorflow,代码行数:52,代码来源:nasnet.py
示例7: _eager_metrics_fn
def _eager_metrics_fn(model, outputs, targets):
"""Calculates the metrics for each output of the given model.
Arguments:
model: The model on which metrics are being calculated.
outputs: The outputs of the given model.
targets: The predictions or targets of the given model.
Returns:
Returns the metric names and metric results for each output of the model.
"""
metric_names = []
metric_results = []
if not isinstance(outputs, list):
outputs = [outputs]
if not isinstance(targets, list):
targets = [targets]
for i in range(len(model.outputs)):
output_metrics = model.nested_metrics[i]
for nested_output_metric in output_metrics:
metric_name, metric_fn = _get_metrics_info(
nested_output_metric, backend.int_shape(model.outputs[i]),
model.loss_functions[i])
if len(model.output_names) > 1:
metric_name = model.output_names[i] + '_' + metric_name
if metric_name not in model.metrics_names:
model.metrics_names.append(metric_name)
with backend.name_scope(metric_name):
metric_result = metric_fn(targets[i], outputs[i])
metric_names.append(metric_name)
metric_results.append(backend.mean(metric_result))
return metric_results
开发者ID:didukhle,项目名称:tensorflow,代码行数:37,代码来源:training_eager.py
示例8: _model_loss
def _model_loss(model, inputs, targets, sample_weights=None, training=False):
"""Calculates the loss for a given model.
Arguments:
model: The model on which metrics are being calculated.
inputs: Either a dictionary of inputs to the model or a list of input
arrays.
targets: List of target arrays.
sample_weights: Optional list of sample weight arrays.
training: Whether the model should be run in inference or training mode.
Returns:
Returns the model output, total loss, loss value calculated using the
specified loss function and masks for each output. The total loss includes
regularization losses and applies masking and sample weighting
to the loss value.
"""
total_loss = 0
kwargs = {}
if model._expects_training_arg:
kwargs['training'] = training
if len(inputs) == 1 and not isinstance(inputs, dict):
inputs = inputs[0]
if model._compute_output_and_mask_jointly:
outs, masks = model._call_and_compute_mask(inputs, **kwargs)
masks = generic_utils.to_list(masks)
else:
outs = model.call(inputs, **kwargs)
masks = None
outs = generic_utils.to_list(outs)
if masks is None:
masks = [None for _ in outs]
targets = generic_utils.to_list(targets)
loss_metrics = []
with backend.name_scope('loss'):
for i, loss_fn in enumerate(model.loss_functions):
if sample_weights:
weights = sample_weights[i]
else:
weights = None
mask = masks[i]
weighted_masked_fn = training_utils.weighted_masked_objective(loss_fn)
with backend.name_scope(model.output_names[i] + '_loss'):
output_loss = weighted_masked_fn(
targets[i], outs[i], weights, mask=mask)
# If the number of outputs is 1 then we don't append the loss metric
# associated with each model output. When there are multiple outputs
# associated with a model, each output's loss is calculated and returned
# as part of the loss_metrics.
if len(model.outputs) > 1:
loss_metrics.append(backend.mean(output_loss))
loss_weight = model.loss_weights_list[i]
if total_loss is None:
total_loss = loss_weight * output_loss
else:
total_loss += loss_weight * output_loss
total_loss = backend.mean(total_loss)
# Add regularization losses
custom_losses = []
for layer in model.layers:
if layer.losses:
custom_losses += layer.losses
if custom_losses:
total_loss += sum(custom_losses)
return outs, total_loss, loss_metrics, masks
开发者ID:ThunderQi,项目名称:tensorflow,代码行数:73,代码来源:training_eager.py
示例9: _eager_loss_fn
def _eager_loss_fn(outputs, targets, loss_fn, output_name):
with backend.name_scope(output_name + '_loss'):
loss = loss_fn(targets, outputs)
return loss
开发者ID:ThunderQi,项目名称:tensorflow,代码行数:4,代码来源:training_eager.py
示例10: _model_loss
def _model_loss(model,
inputs,
targets,
output_loss_metrics=None,
sample_weights=None,
training=False):
"""Calculates the loss for a given model.
Arguments:
model: The model on which metrics are being calculated.
inputs: Either a dictionary of inputs to the model or a list of input
arrays.
targets: List of target arrays.
output_loss_metrics: List of metrics that are used to aggregated output
loss values.
sample_weights: Optional list of sample weight arrays.
training: Whether the model should be run in inference or training mode.
Returns:
Returns the model output, total loss, loss value calculated using the
specified loss function and masks for each output. The total loss includes
regularization losses and applies masking and sample weighting
to the loss value.
"""
total_loss = 0
kwargs = {}
if model._expects_training_arg:
kwargs['training'] = training
if len(inputs) == 1 and not isinstance(inputs, dict):
inputs = inputs[0]
if model._compute_output_and_mask_jointly:
outs, masks = model._call_and_compute_mask(inputs, **kwargs)
masks = nest.flatten(masks)
else:
outs = model.call(inputs, **kwargs)
masks = None
outs = nest.flatten(outs)
if masks is None:
masks = [None for _ in outs]
targets = nest.flatten(targets)
loss_metrics = []
aggregated_loss_metrics = []
with backend.name_scope('loss'):
for i, loss_fn in enumerate(model.loss_functions):
if sample_weights:
weights = sample_weights[i]
else:
weights = None
mask = masks[i]
with backend.name_scope(model.output_names[i] + '_loss'):
if isinstance(loss_fn, losses_module.Loss):
if mask is not None:
mask = math_ops.cast(mask, outs[i].dtype)
# Update weights with mask.
if weights is None:
weights = mask
else:
# Update dimensions of weights to match with mask if possible.
mask, _, weights = squeeze_or_expand_dimensions(
mask, None, weights)
weights *= mask
output_loss = loss_fn(targets[i], outs[i], sample_weight=weights)
else:
weighted_masked_fn = training_utils.weighted_masked_objective(loss_fn)
output_loss = weighted_masked_fn(
targets[i], outs[i], weights, mask=mask)
# If the number of outputs is 1 then we don't append the loss metric
# associated with each model output. When there are multiple outputs
# associated with a model, each output's loss is calculated and returned
# as part of the loss_metrics.
if len(model.outputs) > 1:
loss_metrics.append(backend.mean(output_loss))
if output_loss_metrics is not None:
# Keep track of the stateful loss result.
aggregated_loss_metrics.append(
training_utils.call_metric_function(
output_loss_metrics[i],
targets[i],
outs[i],
weights=weights,
mask=mask))
loss_weight = model.loss_weights_list[i]
if total_loss is None:
total_loss = loss_weight * output_loss
else:
total_loss += loss_weight * output_loss
total_loss = backend.mean(total_loss)
# Add regularization losses
custom_losses = model.losses
if custom_losses:
total_loss += math_ops.add_n(custom_losses)
model._clear_losses()
#.........这里部分代码省略.........
开发者ID:Wajih-O,项目名称:tensorflow,代码行数:101,代码来源:training_eager.py
示例11: _model_loss
def _model_loss(model, inputs, targets, sample_weights=None, training=False):
"""Calculates the loss for a given model.
Arguments:
model: The model on which metrics are being calculated.
inputs: List of input arrays.
targets: List of target arrays.
sample_weights: Optional list of sample weight arrays.
training: Whether the model should be run in inference or training mode.
Returns:
Returns the model output, total loss and loss value calculated using the
specified loss function. The total loss includes regularization losses and
applies masking and sample weighting to the loss value.
"""
total_loss = 0
if len(inputs) == 1:
if model._expects_training_arg:
outs = model.call(inputs[0], training=training)
else:
outs = model.call(inputs[0])
else:
if model._expects_training_arg:
outs = model.call(inputs, training=training)
else:
outs = model.call(inputs)
if not isinstance(outs, list):
outs = [outs]
if not isinstance(targets, list):
targets = [targets]
loss_metrics = []
with backend.name_scope('loss'):
for i, loss_fn in enumerate(model.loss_functions):
if sample_weights:
weights = sample_weights[i]
else:
weights = None
# TODO(fchollet): support masking; in practice `_keras_mask` is never
# set in this context currently.
mask = outs[i]._keras_mask
weighted_masked_fn = training_utils.weighted_masked_objective(loss_fn)
with backend.name_scope(model.output_names[i] + '_loss'):
output_loss = weighted_masked_fn(
targets[i], outs[i], weights, mask=mask)
# If the number of outputs is 1 then we don't append the loss metric
# associated with each model output. When there are multiple outputs
# associated with a model, each output's loss is calculated and returned
# as part of the loss_metrics.
if len(model.outputs) > 1:
loss_metrics.append(backend.mean(output_loss))
loss_weight = model.loss_weights_list[i]
if total_loss is None:
total_loss = loss_weight * output_loss
else:
total_loss += loss_weight * output_loss
total_loss = backend.mean(total_loss)
# Add regularization losses
custom_losses = []
for layer in model.layers:
if layer.losses:
custom_losses += layer.losses
if custom_losses:
total_loss += sum(custom_losses)
return outs, total_loss, loss_metrics
开发者ID:didukhle,项目名称:tensorflow,代码行数:72,代码来源:training_eager.py
示例12: _num_elements
def _num_elements(losses):
"""Computes the number of elements in `losses` tensor."""
with K.name_scope('num_elements') as scope:
return math_ops.cast(array_ops.size(losses, name=scope), dtype=losses.dtype)
开发者ID:aritratony,项目名称:tensorflow,代码行数:4,代码来源:losses_utils.py
示例13: _model_loss
def _model_loss(model,
inputs,
targets,
output_loss_metrics=None,
sample_weights=None,
training=False):
"""Calculates the loss for a given model.
Arguments:
model: The model on which metrics are being calculated.
inputs: Either a dictionary of inputs to the model or a list of input
arrays.
targets: List of target arrays.
output_loss_metrics: List of metrics that are used to aggregated output
loss values.
sample_weights: Optional list of sample weight arrays.
training: Whether the model should be run in inference or training mode.
Returns:
Returns the model output, total loss, loss value calculated using the
specified loss function and masks for each output. The total loss includes
regularization losses and applies masking and sample weighting
to the loss value.
"""
# Used to keep track of the total loss value (stateless).
# eg., total_loss = loss_weight_1 * output_1_loss_fn(...) +
# loss_weight_2 * output_2_loss_fn(...) +
# layer losses.
total_loss = 0
kwargs = {}
if model._expects_training_arg:
kwargs['training'] = training
if len(inputs) == 1 and not isinstance(inputs, dict):
inputs = inputs[0]
# Allow mixed `NumPy` and `EagerTensor` input here.
if any(
isinstance(input_t, (np.ndarray, float, int))
for input_t in nest.flatten(inputs)):
inputs = nest.map_structure(ops.convert_to_tensor, inputs)
outs = model(inputs, **kwargs)
outs = nest.flatten(outs)
# `None` by default for `EagerTensors`.
masks = [t._keras_mask for t in outs]
targets = nest.flatten(targets)
# Used to keep track of individual output losses (stateless).
output_losses = []
# Used to keep track of individual output losses (stateful).
aggregated_output_losses = []
with backend.name_scope('loss'):
for i, loss_fn in enumerate(model.loss_functions):
weights = sample_weights[i] if sample_weights else None
mask = masks[i]
with backend.name_scope(model.output_names[i] + '_loss'):
if mask is not None:
mask = math_ops.cast(mask, outs[i].dtype)
# Update weights with mask.
if weights is None:
weights = mask
else:
# Update dimensions of weights to match with mask if possible.
mask, _, weights = (
losses_utils.squeeze_or_expand_dimensions(mask, None, weights))
weights *= mask
# Reset reduction on the loss so that we can get the per sample loss
# value. We use this to get both the stateless and stateful loss
# values without having to compute the underlying loss function
# twice.
weighted_losses = None
if hasattr(loss_fn, 'reduction'):
current_loss_reduction = loss_fn.reduction
loss_fn.reduction = losses_utils.ReductionV2.NONE
weighted_losses = loss_fn(targets[i], outs[i], sample_weight=weights)
loss_fn.reduction = current_loss_reduction
# Compute the stateless loss value.
output_loss = losses_utils.reduce_weighted_loss(weighted_losses)
else:
# Compute the stateless loss value for a custom loss class.
# Here we assume that the class takes care of loss reduction
# because if this class returns a vector value we cannot
# differentiate between use case where a custom optimizer
# expects a vector loss value vs unreduced per-sample loss value.
output_loss = loss_fn(targets[i], outs[i], sample_weight=weights)
# If the number of outputs is 1 then we don't append the loss metric
# associated with each model output. When there are multiple outputs
# associated with a model, each output's loss is calculated and returned
# as part of the loss_metrics.
if len(model.outputs) > 1:
output_losses.append(backend.mean(output_loss))
if output_loss_metrics is not None:
# Compute the stateful loss value.
if weighted_losses is not None:
aggregated_output_loss = output_loss_metrics[i](weighted_losses)
#.........这里部分代码省略.........
开发者ID:kylin9872,项目名称:tensorflow,代码行数:101,代码来源:training_eager.py
示例14: _reduction_a_cell
def _reduction_a_cell(ip, p, filters, block_id=None):
"""Adds a Reduction cell for NASNet-A (Fig. 4 in the paper).
Arguments:
ip: Input tensor `x`
p: Input tensor `p`
filters: Number of output filters
block_id: String block_id
Returns:
A Keras tensor
"""
channel_dim = 1 if K.image_data_format() == 'channels_first' else -1
with K.name_scope('reduction_A_block_%s' % block_id):
p = _adjust_block(p, ip, filters, block_id)
h = Activation('relu')(ip)
h = Conv2D(
filters, (1, 1),
strides=(1, 1),
padding='same',
name='reduction_conv_1_%s' % block_id,
use_bias=False,
kernel_initializer='he_normal')(
h)
h = BatchNormalization(
axis=channel_dim,
momentum=0.9997,
epsilon=1e-3,
name='reduction_bn_1_%s' % block_id)(
h)
with K.name_scope('block_1'):
x1_1 = _separable_conv_block(
h,
filters, (5, 5),
strides=(2, 2),
block_id='reduction_left1_%s' % block_id)
x1_2 = _separable_conv_block(
p,
filters, (7, 7),
strides=(2, 2),
block_id='reduction_1_%s' % block_id)
x1 = add([x1_1, x1_2], name='reduction_add_1_%s' % block_id)
with K.name_scope('block_2'):
x2_1 = MaxPooling2D(
(3, 3),
strides=(2, 2),
padding='same',
name='reduction_left2_%s' % block_id)(
h)
x2_2 = _separable_conv_block(
p,
filters, (7, 7),
strides=(2, 2),
block_id='reduction_right2_%s' % block_id)
x2 = add([x2_1, x2_2], name='reduction_add_2_%s' % block_id)
with K.name_scope('block_3'):
x3_1 = AveragePooling2D(
(3, 3),
strides=(2, 2),
padding='same',
name='reduction_left3_%s' % block_id)(
h)
x3_2 = _separable_conv_block(
p,
filters, (5, 5),
strides=(2, 2),
block_id='reduction_right3_%s' % block_id)
x3 = add([x3_1, x3_2], name='reduction_add3_%s' % block_id)
with K.name_scope('block_4'):
x4 = AveragePooling2D(
(3, 3),
strides=(1, 1),
padding='same',
name='reduction_left4_%s' % block_id)(
x1)
x4 = add([x2, x4])
with K.name_scope('block_5'):
x5_1 = _separable_conv_block(
x1, filters, (3, 3), block_id='reduction_left4_%s' % block_id)
x5_2 = MaxPooling2D(
(3, 3),
strides=(2, 2),
padding='same',
name='reduction_right5_%s' % block_id)(
h)
x5 = add([x5_1, x5_2], name='reduction_add4_%s' % block_id)
x = concatenate(
[x2, x3, x4, x5],
axis=channel_dim,
name='reduction_concat_%s' % block_id)
return x, ip
开发者ID:Huoxubeiyin,项目名称:tensorflow,代码行数:99,代码来源:nasnet.py
示例15: _adjust_block
def _adjust_block(p, ip, filters, block_id=None):
"""Adjusts the input `previous path` to match the shape of the `input`.
Used in situations where the output number of filters needs to be changed.
Arguments:
p: Input tensor which needs to be modified
ip: Input tensor whose shape needs to be matched
filters: Number of output filters to be matched
block_id: String block_id
Returns:
Adjusted Keras tensor
"""
channel_dim = 1 if K.image_data_format() == 'channels_first' else -1
img_dim = 2 if K.image_data_format() == 'channels_first' else -2
ip_shape = K.int_shape(ip)
if p is not None:
p_shape = K.int_shape(p)
with K.name_scope('adjust_block'):
if p is None:
p = ip
elif p_shape[img_dim] != ip_shape[img_dim]:
with K.name_scope('adjust_reduction_block_%s' % block_id):
p = Activation('relu', name='adjust_relu_1_%s' % block_id)(p)
p1 = AveragePooling2D(
(1, 1),
strides=(2, 2),
padding='valid',
name='adjust_avg_pool_1_%s' % block_id)(
p)
p1 = Conv2D(
filters // 2, (1, 1),
padding='same',
use_bias=False,
name='adjust_conv_1_%s' % block_id,
kernel_initializer='he_normal')(
p1)
p2 = ZeroPadding2D(padding=((0, 1), (0, 1)))(p)
p2 = Cropping2D(cropping=((1, 0), (1, 0)))(p2)
p2 = AveragePooling2D(
(1, 1),
strides=(2, 2),
padding='valid',
name='adjust_avg_pool_2_%s' % block_id)(
p2)
p2 = Conv2D(
filters // 2, (1, 1),
padding='same',
use_bias=False,
name='adjust_conv_2_%s' % block_id,
kernel_initializer='he_normal')(
p2)
p = concatenate([p1, p2], axis=channel_dim)
p = BatchNormalization(
axis=channel_dim,
momentum=0.9997,
epsilon=1e-3,
name='adjust_bn_%s' % block_id)(
p)
elif p_shape[channel_dim] != filters:
with K.name_scope('adjust_projection_block_%s' % block_id):
p = Activation('relu')(p)
p = Conv2D(
filters, (1, 1),
strides=(1, 1),
padding='same',
name='adjust_conv_projection_%s' % block_id,
use_bias=False,
kernel_initializer='he_normal')(
p)
p = BatchNormalization(
axis=channel_dim,
momentum=0.9997,
epsilon=1e-3,
name='adjust_bn_%s' % block_id)(
p)
return p
开发者ID:Huoxubeiyin,项目名称:tensorflow,代码行数:86,代码来源:nasnet.py
示例16: _model_loss
def _model_loss(model,
inputs,
targets,
output_loss_metrics=None,
sample_weights=None,
training=False):
"""Calculates the loss for a given model.
Arguments:
model: The model on which metrics are being calculated.
inputs: Either a dictionary of inputs to the model or a list of input
arrays.
targets: List of target arrays.
output_loss_metrics: List of metrics that are used to aggregated output
loss values.
sample_weights: Optional list of sample weight arrays.
training: Whether the model should be run in inference or training mode.
Returns:
Returns the model output, total loss, loss value calculated using the
specified loss function and masks for each output. The total loss includes
regularization losses and applies masking and sample weighting
to the loss value.
"""
# TODO(psv): Dedup code here with graph mode prepare_total_loss() fn.
# Used to keep track of the total loss value (stateless).
# eg., total_loss = loss_weight_1 * output_1_loss_fn(...) +
# loss_weight_2 * output_2_loss_fn(...) +
# layer losses.
total_loss = 0
kwargs = {}
if model._expects_training_arg:
kwargs['training'] = training
if len(inputs) == 1 and not isinstance(inputs, dict):
inputs = inputs[0]
# Allow mixed `NumPy` and `EagerTensor` input here.
if any(
isinstance(input_t, (np.ndarray, float, int))
for input_t in nest.flatten(inputs)):
inputs = nest.map_structure(ops.convert_to_tensor, inputs)
outs = model(inputs, **kwargs)
outs = nest.flatten(outs)
masks = [getattr(t, '_keras_mask', None) for t in outs]
targets = nest.flatten(targets)
# Used to keep track of individual output losses.
output_losses = []
with backend.name_scope('loss'):
loss_fns = [
loss_fn for loss_fn in model.loss_functions if loss_fn is not None
]
for i, loss_fn in enumerate(loss_fns):
weights = sample_weights[i] if sample_weights else None
mask = masks[i]
with backend.name_scope(model.output_names[i] + '_loss'):
if mask is not None:
mask = math_ops.cast(mask, outs[i].dtype)
# Update weights with mask.
if weights is None:
weights = mask
else:
# Update dimensions of weights to match with mask if possible.
mask, _, weights = (
losses_utils.squeeze_or_expand_dimensions(mask, None, weights))
weights *= mask
weighted_losses = None
if hasattr(loss_fn, 'reduction'):
per_sample_losses = loss_fn.call(targets[i], outs[i])
weighted_losses = losses_utils.compute_weighted_loss(
per_sample_losses,
sample_weight=weights,
reduction=losses_utils.ReductionV2.NONE)
loss_reduction = loss_fn.reduction
# `AUTO` loss reduction defaults to `SUM_OVER_BATCH_SIZE` for all
# compile use cases.
if loss_reduction == losses_utils.ReductionV2.AUTO:
loss_reduction = losses_utils.ReductionV2.SUM_OVER_BATCH_SIZE
# Compute the stateless loss value.
output_loss = losses_utils.reduce_weighted_loss(
weighted_losses, reduction=loss_reduction)
if loss_reduction == losses_utils.ReductionV2.SUM_OVER_BATCH_SIZE:
output_loss = losses_utils.scale_loss_for_distribution(output_loss)
else:
# Compute the stateless loss value for a custom loss class.
# Here we assume that the class takes care of loss reduction
# because if this class returns a vector value we cannot
# differentiate between use case where a custom optimizer
# expects a vector loss value vs unreduced per-sample loss value.
output_loss = loss_fn(targets[i], outs[i], sample_weight=weights)
# For custom losses we assume reduction was mean.
output_loss = losses_utils.scale_loss_for_distribution(output_loss)
# If the number of outputs is 1 then we don't append the loss metric
#.........这里部分代码省略.........
开发者ID:aritratony,项目名称:tensorflow,代码行数:101,代码来源:training_eager.py
示例17: build
def build(self, input_shape):
with K.name_scope(self.forward_layer.name):
self.forward_layer.build(input_shape)
with K.name_scope(self.backward_layer.name):
self.backward_layer.build(input_shape)
self.built = True
开发者ID:aritratony,项目名称:tensorflow,代码行数:6,代码来源:wrappers.py
注:本文中的tensorflow.python.keras.backend.name_scope函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。 |
请发表评论