• 设为首页
  • 点击收藏
  • 手机版
    手机扫一扫访问
    迪恩网络手机版
  • 关注官方公众号
    微信扫一扫关注
    公众号

Python backend.mean函数代码示例

原作者: [db:作者] 来自: [db:来源] 收藏 邀请

本文整理汇总了Python中tensorflow.python.keras._impl.keras.backend.mean函数的典型用法代码示例。如果您正苦于以下问题:Python mean函数的具体用法?Python mean怎么用?Python mean使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。



在下文中一共展示了mean函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。

示例1: weighted

  def weighted(y_true, y_pred, weights, mask=None):
    """Wrapper function.

    Arguments:
        y_true: `y_true` argument of `fn`.
        y_pred: `y_pred` argument of `fn`.
        weights: Weights tensor.
        mask: Mask tensor.

    Returns:
        Scalar tensor.
    """
    # score_array has ndim >= 2
    score_array = fn(y_true, y_pred)
    if mask is not None:
      # Cast the mask to floatX to avoid float64 upcasting in theano
      mask = math_ops.cast(mask, K.floatx())
      # mask should have the same shape as score_array
      score_array *= mask
      #  the loss per batch should be proportional
      #  to the number of unmasked samples.
      score_array /= K.mean(mask)

    # apply sample weighting
    if weights is not None:
      # reduce score_array to same ndim as weight array
      ndim = K.ndim(score_array)
      weight_ndim = K.ndim(weights)
      score_array = K.mean(score_array, axis=list(range(weight_ndim, ndim)))
      score_array *= weights
      score_array /= K.mean(
          math_ops.cast(math_ops.not_equal(weights, 0), K.floatx()))
    return K.mean(score_array)
开发者ID:syed-ahmed,项目名称:tensorflow,代码行数:33,代码来源:training_utils.py


示例2: logcosh

def logcosh(y_true, y_pred):
  """Logarithm of the hyperbolic cosine of the prediction error.

  `log(cosh(x))` is approximately equal to `(x ** 2) / 2` for small `x` and
  to `abs(x) - log(2)` for large `x`. This means that 'logcosh' works mostly
  like the mean squared error, but will not be so strongly affected by the
  occasional wildly incorrect prediction.

  Arguments:
      y_true: tensor of true targets.
      y_pred: tensor of predicted targets.

  Returns:
      Tensor with one scalar loss entry per sample.
  """

  def _logcosh(x):
    return x + nn.softplus(-2. * x) - math_ops.log(2.)

  return K.mean(_logcosh(y_pred - y_true), axis=-1)
开发者ID:Jackiefan,项目名称:tensorflow,代码行数:20,代码来源:losses.py


示例3: _eager_metrics_fn

def _eager_metrics_fn(model, outputs, targets):
  """Calculates the metrics for each output of the given model.

  Arguments:
      model: The model on which metrics are being calculated.
      outputs: The outputs of the given model.
      targets: The predictions or targets of the given model.

  Returns:
      Returns the metric names and metric results for each output of the model.
  """
  metric_names = []
  metric_results = []
  if not isinstance(outputs, list):
    outputs = [outputs]

  if not isinstance(targets, list):
    targets = [targets]

  for i in range(len(model.outputs)):
    output_metrics = model.nested_metrics[i]
    for nested_output_metric in output_metrics:
      metric_name, metric_fn = _get_metrics_info(
          nested_output_metric, backend.int_shape(model.outputs[i]),
          model.loss_functions[i])

      if len(model.output_names) > 1:
        metric_name = model.output_names[i] + '_' + metric_name
        if metric_name not in model.metrics_names:
          model.metrics_names.append(metric_name)

      with backend.name_scope(metric_name):
        metric_result = metric_fn(outputs[i], targets[i])
        metric_names.append(metric_name)
        metric_results.append(backend.mean(metric_result))

  return metric_results
开发者ID:kimr843,项目名称:tensorflow,代码行数:37,代码来源:training_eager.py


示例4: _model_loss

def _model_loss(model, inputs, targets):
  """Calculates the loss for a given model.

  Arguments:
     model: The model on which metrics are being calculated.
     inputs: The inputs of the given model. This is typically the mini batch of
              data that is fed to the model.
     targets: The predictions or targets of the given model.

  Returns:
     Returns the model output, total loss and loss value calculated using the
     specified loss function. The total loss includes regularization losses and
     applies masking and sample weighting to the loss value.
  """
  total_loss = 0
  if len(inputs) == 1:
    outs = model.call(inputs[0])
  else:
    outs = model.call(inputs)
  if not isinstance(outs, list):
    outs = [outs]

  if not isinstance(targets, list):
    targets = [targets]

  loss_metrics = []
  with K.name_scope('loss'):
    for i, loss_fn in enumerate(model.loss_functions):
      # compute the loss
      output_loss = _eager_loss_fn(outs[i], targets[i], loss_fn,
                                   model.output_names[i])
      loss_metrics.append(K.mean(output_loss))

      mask = outs[i]._keras_mask
      # adapted from weighted_loss_fn
      if mask is not None:
        # mask should have the same shape as output_loss
        output_loss *= mask
        #  the loss per batch should be proportional
        #  to the number of unmasked samples.
        output_loss /= K.mean(mask)

      # adapted from weighted_loss_fn
      # apply sample weighting
      if model.sample_weights:
        # reduce score_array to same ndim as weight array
        ndim = K.ndim(output_loss)
        weight_ndim = K.ndim(model.sample_weights)
        output_loss = K.mean(output_loss, axis=list(range(weight_ndim, ndim)))
        output_loss *= model.sample_weights
        output_loss /= K.mean(K.cast(K.not_equal(model.sample_weights, 0),
                                     K.floatx()))
        output_loss = K.mean(output_loss)

      loss_weight = model.loss_weights_list[i]
      if total_loss is None:
        total_loss = loss_weight * output_loss
      else:
        total_loss += loss_weight * output_loss

    total_loss = K.mean(total_loss)
    # Add regularization losses
    custom_losses = []
    for layer in model.layers:
      if layer.losses:
        custom_losses += layer.losses

    if custom_losses:
      total_loss += sum(custom_losses)

  return outs, total_loss, loss_metrics
开发者ID:dananjayamahesh,项目名称:tensorflow,代码行数:71,代码来源:training_eager.py


示例5: top_k_categorical_accuracy

def top_k_categorical_accuracy(y_true, y_pred, k=5):
  return K.mean(K.in_top_k(y_pred, K.argmax(y_true, axis=-1), k), axis=-1)
开发者ID:1000sprites,项目名称:tensorflow,代码行数:2,代码来源:metrics.py


示例6: call

 def call(self, inputs):
   if self.data_format == 'channels_last':
     return K.mean(inputs, axis=[1, 2, 3])
   else:
     return K.mean(inputs, axis=[2, 3, 4])
开发者ID:ChengYuXiang,项目名称:tensorflow,代码行数:5,代码来源:pooling.py


示例7: squared_hinge

def squared_hinge(y_true, y_pred):
  return K.mean(
      math_ops.square(math_ops.maximum(1. - y_true * y_pred, 0.)), axis=-1)
开发者ID:Jackiefan,项目名称:tensorflow,代码行数:3,代码来源:losses.py


示例8: mean_absolute_percentage_error

def mean_absolute_percentage_error(y_true, y_pred):
  diff = math_ops.abs(
      (y_true - y_pred) / K.clip(math_ops.abs(y_true), K.epsilon(), None))
  return 100. * K.mean(diff, axis=-1)
开发者ID:Jackiefan,项目名称:tensorflow,代码行数:4,代码来源:losses.py


示例9: mean_squared_error

def mean_squared_error(y_true, y_pred):
  return K.mean(math_ops.square(y_pred - y_true), axis=-1)
开发者ID:Jackiefan,项目名称:tensorflow,代码行数:2,代码来源:losses.py


示例10: batch_test_loop

def batch_test_loop(model,
                    inputs,
                    targets,
                    batch_size,
                    sample_weights=None,
                    verbose=0):
  """Test function for eager execution when input is given as arrays or tensors.

  Arguments:
      model: Model instance that is being evaluated in Eager mode.
      inputs: List of input arrays.
      targets: List of target arrays.
      batch_size: Integer batch size.
      sample_weights: Optional list of sample weight arrays.
      verbose: Verbosity mode.

  Returns:
      Scalar loss (if the model has a single output and no metrics)
      or list of scalars (if the model has multiple outputs
      and/or metrics). The attribute `model.metrics_names` will give you
      the display labels for the scalar outputs.
  """
  outs = []
  feed_data = inputs + targets
  if sample_weights:
    feed_data += sample_weights
  num_samples = training_utils.check_num_samples(
      feed_data, batch_size=batch_size)
  if verbose == 1:
    progbar = generic_utils.Progbar(target=num_samples)
  batches = generic_utils.make_batches(num_samples, batch_size)
  index_array = np.arange(num_samples)
  for batch_index, (batch_start, batch_end) in enumerate(batches):
    batch_ids = index_array[batch_start:batch_end]
    inputs_batch = slice_arrays(inputs, batch_ids)
    targets_batch = slice_arrays(targets, batch_ids)
    if sample_weights:
      sample_weights_batch = slice_arrays(sample_weights, batch_ids)
    else:
      sample_weights_batch = None

    inputs_batch = [
        ops.convert_to_tensor(val, dtype=backend.floatx())
        for val in inputs_batch
    ]
    targets_batch = [
        ops.convert_to_tensor(val, dtype=backend.floatx())
        for val in targets_batch
    ]
    if sample_weights:
      sample_weights_batch = [
          ops.convert_to_tensor(val, dtype=backend.floatx())
          if val is not None else None for val in sample_weights_batch
      ]

    loss_outs, loss, loss_metrics = _model_loss(
        model,
        inputs_batch,
        targets_batch,
        sample_weights=sample_weights_batch,
        training=False)
    metrics_results = _eager_metrics_fn(model, loss_outs, targets_batch)
    batch_outs = []
    for _, v in zip(model.metrics_names,
                    [backend.mean(loss)] + loss_metrics + metrics_results):
      batch_outs.append(tensor_util.constant_value(v))

    if isinstance(batch_outs, list):
      if batch_index == 0:
        for _ in enumerate(batch_outs):
          outs.append(0.)
      for i, batch_out in enumerate(batch_outs):
        outs[i] += batch_out * len(batch_ids)
    else:
      if batch_index == 0:
        outs.append(0.)
      outs[0] += batch_outs * len(batch_ids)

    if verbose == 1:
      progbar.update(batch_end)

  for i in range(len(outs)):
    outs[i] /= num_samples
  if len(outs) == 1:
    return outs[0]
  return outs
开发者ID:KiaraStarlab,项目名称:tensorflow,代码行数:86,代码来源:training_eager.py


示例11: iterator_test_loop

def iterator_test_loop(model, inputs, steps, verbose=0):
  """Test function for eager execution when input is given as dataset iterator.

  Arguments:
      model: Model instance that is being evaluated in Eager mode.
      inputs: Input dataset iterator.
      steps: Total number of steps (batches of samples) before declaring
      predictions finished.
      verbose: Verbosity mode.

  Returns:
      Scalar loss (if the model has a single output and no metrics)
      or list of scalars (if the model has multiple outputs
      and/or metrics). The attribute `model.metrics_names` will give you
      the display labels for the scalar outputs.

  Raises:
      ValueError: In case of mismatch between given number of inputs and
        expectations of the model.
  """
  assert isinstance(inputs, iterator_ops.EagerIterator)
  outs = []
  num_samples = 0
  if verbose == 1:
    progbar = generic_utils.Progbar(target=steps)
  for step_index in range(steps):
    # Get data from the iterator.
    try:
      next_element = inputs.get_next()
    except errors.OutOfRangeError:
      logging.warning(
          'Your dataset iterator ran out of data interrupting testing. '
          'Make sure that your dataset can generate at least `steps` batches '
          '(in this case, %d batches).', steps)
      break

    if not isinstance(next_element, (list, tuple)) or len(next_element) != 2:
      raise ValueError('Please provide data as a list or tuple of 2 elements '
                       ' - input and target pair. Received %s' % next_element)
    x, y = next_element

    # Validate and standardize data.
    x, y, sample_weights = model._standardize_user_data(x, y)

    # Calculate model output, loss values.
    loss_outs, loss, loss_metrics = _model_loss(
        model, x, y, sample_weights=sample_weights, training=False)
    metrics_results = _eager_metrics_fn(model, loss_outs, y)
    batch_outs = []
    for _, v in zip(model.metrics_names,
                    [backend.mean(loss)] + loss_metrics + metrics_results):
      batch_outs.append(tensor_util.constant_value(v))

    # Get current step size.
    if isinstance(x, list):
      step_size = x[0].get_shape().as_list()[0]
    else:
      step_size = x.get_shape().as_list()[0]

    # Accumulate results in output array.
    if not isinstance(batch_outs, list):
      batch_outs = [batch_outs]
    if step_index == 0:
      for _ in enumerate(batch_outs):
        outs.append(0.)
    for i, batch_out in enumerate(batch_outs):
      outs[i] += batch_out * step_size

    # Calculate sample size.
    num_samples += step_size
    if verbose == 1:
      progbar.update(step_index + 1)

    for i in range(len(outs)):
      outs[i] /= num_samples
    if len(outs) == 1:
      return outs[0]
    return outs
开发者ID:KiaraStarlab,项目名称:tensorflow,代码行数:78,代码来源:training_eager.py


示例12: batch_fit_loop

def batch_fit_loop(model,
                   inputs,
                   targets,
                   epoch_logs,
                   index_array,
                   out_labels,
                   callback_model,
                   batch_size,
                   sample_weights=None,
                   val_inputs=None,
                   val_targets=None,
                   val_sample_weights=None,
                   callbacks=None,
                   shuffle=True,
                   num_train_samples=None,
                   do_validation=False):
  """Fit function for eager execution when input is given as arrays or tensors.

  Updates the given epoch logs.

  Arguments:
      model: Instance of the `Model`.
      inputs: List of input arrays.
      targets: List of target arrays.
      epoch_logs: Dictionary of logs from every epoch.
      index_array: Index array generated from number of training samples.
      out_labels: Output labels generated from model metric names.
      callback_model: Instance of `Model` to callback.
      batch_size: Integer batch size or None if unknown.
      sample_weights: Optional list of sample weight arrays.
      val_inputs: Input data for validation.
      val_targets: Target data for validation.
      val_sample_weights: Sample weight data for validation.
      callbacks: List of callbacks to be called during training.
      shuffle: Whether to shuffle the data at the beginning of each epoch.
      num_train_samples: Integer number of training samples.
      do_validation: Boolean value indicating whether we should do validation.
  """
  # TODO(psv): Create a dataset iterator instead of manually creating batches
  # here and in batch_test_loop, batch_predict_loop.
  if shuffle == 'batch':
    index_array = model._batch_shuffle(index_array, batch_size)
  elif shuffle:
    np.random.shuffle(index_array)

  batches = generic_utils.make_batches(num_train_samples, batch_size)

  for batch_index, (batch_start, batch_end) in enumerate(batches):
    batch_ids = index_array[batch_start:batch_end]
    inputs_batch = slice_arrays(inputs, batch_ids, contiguous=not shuffle)
    targets_batch = slice_arrays(targets, batch_ids, contiguous=not shuffle)
    if sample_weights:
      sample_weights_batch = slice_arrays(
          sample_weights, batch_ids, contiguous=not shuffle)
    else:
      sample_weights_batch = None
    batch_logs = {}
    batch_logs['batch'] = batch_index
    batch_logs['size'] = len(batch_ids)

    callbacks.on_batch_begin(batch_index, batch_logs)

    inputs_batch = [
        ops.convert_to_tensor(val, dtype=backend.floatx())
        for val in inputs_batch
    ]
    targets_batch = [
        ops.convert_to_tensor(val, dtype=backend.floatx())
        for val in targets_batch
    ]
    if sample_weights:
      sample_weights_batch = [
          ops.convert_to_tensor(val, dtype=backend.floatx())
          if val is not None else None for val in sample_weights_batch
      ]

    outs, loss, loss_metrics = _process_single_batch(
        model,
        inputs_batch,
        targets_batch,
        sample_weights=sample_weights_batch,
        training=True)

    if not isinstance(outs, list):
      outs = [outs]

    for l, o in zip(out_labels, outs):
      batch_logs[l] = o
    # Required for eager execution
    metrics_results = _eager_metrics_fn(model, outs, targets_batch)
    batch_logs['loss'] = tensor_util.constant_value(backend.mean(loss))

    for k, v in zip(model.metrics_names,
                    [backend.mean(loss)] + loss_metrics + metrics_results):
      batch_logs[k] = tensor_util.constant_value(v)
    callbacks.on_batch_end(batch_index, batch_logs)
    if callback_model.stop_training:
      break

    if batch_index == len(batches) - 1:  # Last batch.
#.........这里部分代码省略.........
开发者ID:KiaraStarlab,项目名称:tensorflow,代码行数:101,代码来源:training_eager.py


示例13: iterator_fit_loop


#.........这里部分代码省略.........
      val_inputs: Input data for validation.
      val_targets: Target data for validation.
      val_sample_weights: Sample weight data for validation.
      epochs: Number of times to iterate over the data
      verbose: Verbosity mode, 0, 1 or 2
      callbacks: List of callbacks to be called during training
      callback_metrics: List of strings, the display names of the metrics
          passed to the callbacks. They should be the
          concatenation of list the display names of the outputs of
           `f` and the list of display names of the outputs of `f_val`.
      validation_steps: Number of steps to run validation for (only if doing
        validation from data tensors). Ignored with default value of `None`.
      do_validation: Boolean value indicating whether we should do validation.

  Raises:
      ValueError: In case of mismatch between given number of inputs and
        expectations of the model.
  """
  assert isinstance(inputs, iterator_ops.EagerIterator)
  for step_index in range(steps_per_epoch):
    batch_logs = {}
    batch_logs['batch'] = step_index
    batch_logs['size'] = 1
    callbacks.on_batch_begin(step_index, batch_logs)

    # Get data from the iterator.
    try:
      next_element = inputs.get_next()
    except errors.OutOfRangeError:
      logging.warning(
          'Your dataset iterator ran out of data; '
          'interrupting training. Make sure that your dataset'
          ' can generate at least `steps_per_epoch * epochs` '
          'batches (in this case, %d batches).' % steps_per_epoch * epochs)
      break

    if not isinstance(next_element, (list, tuple)) or len(next_element) != 2:
      raise ValueError('Please provide data as a list or tuple of 2 elements '
                       ' - input and target pair. Received %s' % next_element)
    x, y = next_element

    # Validate and standardize data.
    x, y, sample_weights = model._standardize_user_data(
        x, y, class_weight=class_weight)
    if sample_weights:
      sample_weights = [
          ops.convert_to_tensor(val, dtype=backend.floatx())
          if val is not None else None for val in sample_weights
      ]

    if step_index == 0 and not callback_metrics:
      out_labels = model.metrics_names
      if do_validation:
        callback_metrics = copy.copy(out_labels) + [
            'val_' + n for n in out_labels
        ]
      else:
        callback_metrics = copy.copy(out_labels)
      callbacks.set_params({
          'epochs': epochs,
          'steps': steps_per_epoch,
          'verbose': verbose,
          'do_validation': do_validation,
          'metrics': callback_metrics or [],
      })

    # Train model.
    outs, loss, loss_metrics = _process_single_batch(
        model, x, y, sample_weights=sample_weights, training=True)
    if not isinstance(outs, list):
      outs = [outs]

    # Calculate metrics.
    for l, o in zip(out_labels, outs):
      batch_logs[l] = o
    # Required for eager execution
    metrics_results = _eager_metrics_fn(model, outs, y)
    batch_logs['loss'] = tensor_util.constant_value(backend.mean(loss))

    for k, v in zip(model.metrics_names,
                    [backend.mean(loss)] + loss_metrics + metrics_results):
      batch_logs[k] = tensor_util.constant_value(v)
    callbacks.on_batch_end(step_index, batch_logs)
    if callback_model.stop_training:
      break

    if step_index == steps_per_epoch - 1:
      if do_validation:
        val_outs = test_loop(
            model,
            val_inputs,
            val_targets,
            sample_weights=val_sample_weights,
            steps=validation_steps,
            verbose=0)
        if not isinstance(val_outs, list):
          val_outs = [val_outs]
        # Same labels assumed.
        for l, o in zip(out_labels, val_outs):
          epoch_logs['val_' + l] = o
开发者ID:KiaraStarlab,项目名称:tensorflow,代码行数:101,代码来源:training_eager.py


示例14: test_loop

def test_loop(model, ins, batch_size=None, verbose=0, steps=None):
  """Abstract method to loop over some data in batches.

  Arguments:
      model: Model instance that is being evaluated in Eager mode.
      ins: list of tensors to be fed to `f`.
      batch_size: integer batch size or `None`.
      verbose: verbosity mode.
      steps: Total number of steps (batches of samples)
          before declaring predictions finished.
          Ignored with the default value of `None`.

  Returns:
      Scalar loss (if the model has a single output and no metrics)
      or list of scalars (if the model has multiple outputs
      and/or metrics). The attribute `model.metrics_names` will give you
      the display labels for the scalar outputs.
  """
  K.set_learning_phase(False)
  num_samples = model._check_num_samples(ins, batch_size, steps, 'steps')
  outs = []
  if verbose == 1:
    progbar = Progbar(target=num_samples)
  batches = make_batches(num_samples, batch_size)
  index_array = np.arange(num_samples)
  for batch_index, (batch_start, batch_end) in enumerate(batches):
    batch_ids = index_array[batch_start:batch_end]
    if isinstance(ins[-1], float):
      # Do not slice the training phase flag.
      ins_batch = slice_arrays(ins[:-1], batch_ids) + [ins[-1]]
    else:
      ins_batch = slice_arrays(ins, batch_ids)

    ins_batch_converted = []
    for ib in ins_batch:
      ins_batch_converted.append(ops.convert_to_tensor(ib, dtype=K.floatx()))

    eager_model_inputs = []
    eager_model_outputs = []
    for i in range(len(model.inputs)):
      eager_model_inputs.append(ins_batch_converted[i])

    for i in range(len(model.inputs), len(ins_batch_converted)):
      eager_model_outputs.append(ins_batch_converted[i])

    loss_outs, loss, loss_metrics = _model_loss(model, eager_model_inputs,
                                                eager_model_outputs)
    _, metrics_results = _eager_metrics_fn(model, loss_outs,
                                           eager_model_outputs)
    batch_outs = []
    for _, v in zip(model.metrics_names,
                    [K.mean(loss)] + loss_metrics + metrics_results):
      batch_outs.append(tensor_util.constant_value(v))

    if isinstance(batch_outs, list):
      if batch_index == 0:
        for batch_out in enumerate(batch_outs):
          outs.append(0.)
      for i, batch_out in enumerate(batch_outs):
        outs[i] += batch_out * len(batch_ids)
    else:
      if batch_index == 0:
        outs.append(0.)
      outs[0] += batch_outs * len(batch_ids)

    if verbose == 1:
      progbar.update(batch_end)
  for i in range(len(outs)):
    outs[i] /= num_samples
  if len(outs) == 1:
    return outs[0]
  return outs
开发者ID:dananjayamahesh,项目名称:tensorflow,代码行数:72,代码来源:training_eager.py


示例15: fit_loop


#.........这里部分代码省略.........
  callbacks.on_train_begin()
  callback_model.stop_training = False
  for cbk in callbacks:
    cbk.validation_data = val_ins

  for epoch in range(initial_epoch, epochs):
    callbacks.on_epoch_begin(epoch)
    epoch_logs = {}
    if shuffle == 'batch':
      index_array = model._batch_shuffle(index_array, batch_size)
    elif shuffle:
      np.random.shuffle(index_array)

    batches = make_batches(num_train_samples, batch_size)

    for batch_index, (batch_start, batch_end) in enumerate(batches):
      batch_ids = index_array[batch_start:batch_end]
      try:
        if isinstance(ins[-1], float):
          # Do not slice the training phase flag.
          ins_batch = slice_arrays(ins[:-1], batch_ids) + [ins[-1]]
        else:
          ins_batch = slice_arrays(ins, batch_ids)
      except TypeError:
        raise TypeError('TypeError while preparing batch. '
                        'If using HDF5 input data, '
                        'pass shuffle="batch".')
      batch_logs = {}
      batch_logs['batch'] = batch_index
      batch_logs['size'] = len(batch_ids)

      callbacks.on_batch_begin(batch_index, batch_logs)

      ins_batch_converted = []
      for ib in ins_batch:
        ins_batch_converted.append(ops.convert_to_tensor(ib, dtype=K.floatx()))
      eager_model_inputs = []
      eager_model_outputs = []
      for i in range(len(model.inputs)):
        eager_model_inputs.append(ins_batch_converted[i])

      for i in range(len(model.inputs), len(ins_batch_converted)):
        eager_model_outputs.append(ins_batch_converted[i])

      outs, loss, loss_metrics = _process_single_batch(eager_model_inputs,
                                                       eager_model_outputs,
                                                       model)

      if not isinstance(outs, list):
        outs = [outs]

      for l, o in zip(out_labels, outs):
        batch_logs[l] = o
      # Required for Eager mode
      metrics_names, metrics_results = _eager_metrics_fn(model, outs,
                                                         eager_model_outputs)
      batch_logs['loss'] = tensor_util.constant_value(K.mean(loss))

      # TODO(anjalisridhar): Move this to compile to avoid duplicate code.
      # In graph mode we set the metric names in compile. However in
      # Eager mode we calculate the metrics for each batch in fit_loop.
      # We could calculate the metric names and functions in compile.
      # This would avoid setting the callback parameters separately.
      # We need to do this for the first iteration alone
      for m in metrics_names:
        if m not in callback_metrics:
          callback_metrics.append(m)

      callbacks.set_params({
          'batch_size': batch_size,
          'epochs': epochs,
          'steps': steps_per_epoch,
          'samples': num_train_samples,
          'verbose': verbose,
          'do_validation': do_validation,
          'metrics': callback_metrics or [],
      })

      for k, v in zip(model.metrics_names,
                      [K.mean(loss)] + loss_metrics + metrics_results):
        batch_logs[k] = tensor_util.constant_value(v)

      callbacks.on_batch_end(batch_index, batch_logs)
      if callback_model.stop_training:
        break

      if batch_index == len(batches) - 1:  # Last batch.
        if do_validation:
          val_outs = test_loop(
              model, val_ins, batch_size=batch_size, verbose=0)
          if not isinstance(val_outs, list):
            val_outs = [val_outs]
          # Same labels assumed.
          for l, o in zip(out_labels, val_outs):
            epoch_logs['val_' + l] = o
    callbacks.on_epoch_end(epoch, epoch_logs)
    if callback_model.stop_training:
      break
  callbacks.on_train_end()
  return model.history
开发者ID:dananjayamahesh,项目名称:tensorflow,代码行数:101,代码来源:training_eager.py


示例16: binary_crossentropy

def binary_crossentropy(y_true, y_pred):
  return K.mean(K.binary_crossentropy(y_true, y_pred), axis=-1)
开发者ID:Jackiefan,项目名称:tensorflow,代码行数:2,代码来源:losses.py


示例17: poisson

def poisson(y_true, y_pred):
  return K.mean(y_pred - y_true * math_ops.log(y_pred + K.epsilon()), axis=-1)
开发者ID:Jackiefan,项目名称:tensorflow,代码行数:2,代码来源:losses.py


示例18: logcosh

def logcosh(y_true, y_pred):

  def _logcosh(x):
    return x + K.softplus(-2. * x) - K.log(2.)

  return K.mean(_logcosh(y_pred - y_true), axis=-1)
开发者ID:AbhinavJain13,项目名称:tensorflow,代码行数:6,代码来源:losses.py


示例19: mean_absolute_error

def mean_absolute_error(y_true, y_pred):
  return K.mean(math_ops.abs(y_pred - y_true), axis=-1)
开发者ID:Jackiefan,项目名称:tensorflow,代码行数:2,代码来源:losses.py


示例20: mean_absolute_percentage_error

def mean_absolute_percentage_error(y_true, y_pred):
  # Equivalent to MAE, but sometimes easier to interpret.
  diff = K.abs((y_true - y_pred) / K.clip(K.abs(y_true), K.epsilon(), None))
  return 100. * K.mean(diff, axis=-1)
开发者ID:Kongsea,项目名称:tensorflow,代码行数:4,代码来源:losses.py



注:本文中的tensorflow.python.keras._impl.keras.backend.mean函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。


鲜花

握手

雷人

路过

鸡蛋
该文章已有0人参与评论

请发表评论

全部评论

专题导读
上一篇:
Python backend.name_scope函数代码示例发布时间:2022-05-27
下一篇:
Python backend.int_shape函数代码示例发布时间:2022-05-27
热门推荐
阅读排行榜

扫描微信二维码

查看手机版网站

随时了解更新最新资讯

139-2527-9053

在线客服(服务时间 9:00~18:00)

在线QQ客服
地址:深圳市南山区西丽大学城创智工业园
电邮:jeky_zhao#qq.com
移动电话:139-2527-9053

Powered by 互联科技 X3.4© 2001-2213 极客世界.|Sitemap