• 设为首页
  • 点击收藏
  • 手机版
    手机扫一扫访问
    迪恩网络手机版
  • 关注官方公众号
    微信扫一扫关注
    公众号

Python logging_ops.scalar_summary函数代码示例

原作者: [db:作者] 来自: [db:来源] 收藏 邀请

本文整理汇总了Python中tensorflow.python.ops.logging_ops.scalar_summary函数的典型用法代码示例。如果您正苦于以下问题:Python scalar_summary函数的具体用法?Python scalar_summary怎么用?Python scalar_summary使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。



在下文中一共展示了scalar_summary函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。

示例1: _get_train_ops

  def _get_train_ops(self, features, targets):
    """See base class."""
    global_step = contrib_variables.get_global_step()
    assert global_step

    features = self._get_feature_dict(features)
    logits = self._logits(features, is_training=True)
    if self._enable_centered_bias:
      centered_bias_step = [self._centered_bias_step(targets, features)]
    else:
      centered_bias_step = []
    with ops.control_dependencies(centered_bias_step):
      training_loss = self._target_column.training_loss(logits, targets,
                                                        features)
      weighted_average_loss = self._target_column.loss(logits, targets,
                                                       features)

    logging_ops.scalar_summary("loss", weighted_average_loss)

    linear_train_step = self._linear_model.get_train_step(training_loss)
    dnn_train_step = (self._dnn_model.get_train_step(training_loss) if
                      self._dnn_model else [])

    with ops.control_dependencies(linear_train_step + dnn_train_step):
      with ops.get_default_graph().colocate_with(global_step):
        return state_ops.assign_add(global_step, 1).op, weighted_average_loss
开发者ID:MrRabbit0o0,项目名称:tensorflow,代码行数:26,代码来源:dnn_linear_combined.py


示例2: _get_batch

def _get_batch(per_class_queues, probs, batch_size):
  """Generates batches according to per-class-probabilities."""
  num_classes = probs.size
  # Number of examples per class is governed by a multinomial distribution.
  # Note: multinomial takes unnormalized log probabilities for its first
  # argument, of dimension [batch_size, num_classes].
  examples = random_ops.multinomial(
      np.expand_dims(np.log(probs), 0), batch_size)

  # Prepare the data and label batches.
  val_list = []
  label_list = []
  for i in range(num_classes):
    num_examples = math_ops.reduce_sum(
        math_ops.cast(math_ops.equal(examples, i), dtypes.int32))
    val_list.append(per_class_queues[i].dequeue_many(num_examples))
    label_list.append(array_ops.ones([num_examples], dtype=dtypes.int32) * i)

  # Create a tensor of labels.
  batch_labels = array_ops.concat(0, label_list)
  batch_labels.set_shape([batch_size])

  # Debug instrumentation.
  sample_tags = ['stratified_sample/samples_class%i' % i for i in
                 range(num_classes)]
  logging_ops.scalar_summary(sample_tags, math_ops.reduce_sum(
      array_ops.one_hot(batch_labels, num_classes), 0))

  return array_ops.concat(0, val_list), batch_labels
开发者ID:Brandon-Tai,项目名称:tensorflow,代码行数:29,代码来源:sampling_ops.py


示例3: _make_per_class_queues

def _make_per_class_queues(tensor_list, labels, num_classes, queue_capacity, threads_per_queue):
    """Creates per-class-queues based on data and labels."""
    # Create one queue per class.
    queues = []
    data_shapes = []
    data_dtypes = []
    for data_tensor in tensor_list:
        per_data_shape = data_tensor.get_shape().with_rank_at_least(1)[1:]
        per_data_shape.assert_is_fully_defined()
        data_shapes.append(per_data_shape)
        data_dtypes.append(data_tensor.dtype)

    for i in range(num_classes):
        q = data_flow_ops.FIFOQueue(
            capacity=queue_capacity, shapes=data_shapes, dtypes=data_dtypes, name="stratified_sample_class%d_queue" % i
        )
        logging_ops.scalar_summary("queue/%s/stratified_sample_class%d" % (q.name, i), q.size())
        queues.append(q)

    # Partition tensors according to labels. `partitions` is a list of lists, of
    # size num_classes X len(tensor_list). The number of tensors in partition `i`
    # should be the same for all tensors.
    all_partitions = [data_flow_ops.dynamic_partition(data, labels, num_classes) for data in tensor_list]
    partitions = [[cur_partition[i] for cur_partition in all_partitions] for i in range(num_classes)]

    # Enqueue each tensor on the per-class-queue.
    for i in range(num_classes):
        enqueue_op = (queues[i].enqueue_many(partitions[i]),)
        queue_runner.add_queue_runner(queue_runner.QueueRunner(queues[i], [enqueue_op] * threads_per_queue))

    return queues
开发者ID:pronobis,项目名称:tensorflow,代码行数:31,代码来源:sampling_ops.py


示例4: testMergeSummary

 def testMergeSummary(self):
   with self.cached_session():
     c = constant_op.constant(3)
     a = logging_ops.scalar_summary('a', c)
     b = logging_ops.scalar_summary('b', c)
     s = logging_ops.merge_summary([a, b])
     self.assertEqual(s.op.type, u'MergeSummary')
开发者ID:Ajaycs99,项目名称:tensorflow,代码行数:7,代码来源:summaries_test.py


示例5: _get_train_ops

  def _get_train_ops(self, features, targets):
    """See base class."""
    self._validate_linear_feature_columns(features)
    if not isinstance(self._linear_optimizer, sdca_optimizer.SDCAOptimizer):
      return super(LinearClassifier, self)._get_train_ops(features, targets)

    # SDCA currently supports binary classification only.
    if self._target_column.num_label_columns > 2:
      raise ValueError(
          "SDCA does not currently support multi-class classification.")
    global_step = contrib_variables.get_global_step()
    assert global_step

    logits, columns_to_variables, _ = layers.weighted_sum_from_feature_columns(
        columns_to_tensors=features,
        feature_columns=self._linear_feature_columns,
        num_outputs=self._target_column.num_label_columns,
        weight_collections=[self._linear_weight_collection],
        name="linear")
    with ops.control_dependencies([self._centered_bias()]):
      loss = self._loss(logits, targets, features)
    logging_ops.scalar_summary("loss", loss)

    train_ops = self._linear_optimizer.get_train_step(
        self._linear_feature_columns, self._target_column.weight_column_name,
        "logistic_loss", features, targets, columns_to_variables, global_step)

    return train_ops, loss
开发者ID:363158858,项目名称:tensorflow,代码行数:28,代码来源:linear.py


示例6: batch

def batch(tensor_list, batch_size, num_threads=1, capacity=32,
          enqueue_many=False, shapes=None, shared_name=None, name=None):
  """Creates batches of tensors in `tensor_list`.

  This function is implemented using a queue. A `QueueRunner` for the
  queue is added to the current `Graph`'s `QUEUE_RUNNER` collection.

  If `enqueue_many` is `False`, `tensor_list` is assumed to represent a
  single example.  An input tensor with shape `[x, y, z]` will be output
  as a tensor with shape `[batch_size, x, y, z]`.

  If `enqueue_many` is `True`, `tensor_list` is assumed to represent a
  batch of examples, where the first dimension is indexed by example,
  and all members of `tensor_list` should have the same size in the
  first dimension.  If an input tensor has shape `[*, x, y, z]`, the
  output will have shape `[batch_size, x, y, z]`.  The `capacity` argument
  controls the how long the prefetching is allowed to grow the queues.

  The returned operation is a dequeue operation and will throw
  `tf.errors.OutOfRangeError` if the input queue is exhausted. If this
  operation is feeding another input queue, its queue runner will catch
  this exception, however, if this operation is used in your main thread
  you are responsible for catching this yourself.

  *N.B.:* You must ensure that either (i) the `shapes` argument is
  passed, or (ii) all of the tensors in `tensor_list` must have
  fully-defined shapes. `ValueError` will be raised if neither of
  these conditions holds.

  Args:
    tensor_list: The list of tensors to enqueue.
    batch_size: The new batch size pulled from the queue.
    num_threads: The number of threads enqueuing `tensor_list`.
    capacity: An integer. The maximum number of elements in the queue.
    enqueue_many: Whether each tensor in `tensor_list` is a single example.
    shapes: (Optional) The shapes for each example.  Defaults to the
      inferred shapes for `tensor_list`.
    shared_name: (optional). If set, this queue will be shared under the given
      name across multiple sessions.
    name: (Optional) A name for the operations.

  Returns:
    A list of tensors with the same number and types as `tensor_list`.

  Raises:
    ValueError: If the `shapes` are not specified, and cannot be
      inferred from the elements of `tensor_list`.
  """
  with ops.op_scope(tensor_list, name, "batch") as name:
    tensor_list = _validate(tensor_list)
    types = _dtypes([tensor_list])
    shapes = _shapes([tensor_list], shapes, enqueue_many)
    # TODO(josh11b,mrry): Switch to BatchQueue once it is written.
    queue = data_flow_ops.FIFOQueue(
        capacity=capacity, dtypes=types, shapes=shapes, shared_name=shared_name)
    _enqueue(queue, tensor_list, num_threads, enqueue_many)
    logging_ops.scalar_summary(
        "queue/%s/fraction_of_%d_full" % (queue.name, capacity),
        math_ops.cast(queue.size(), dtypes.float32) * (1. / capacity))
    return queue.dequeue_many(batch_size, name=name)
开发者ID:13683116633,项目名称:tensorflow,代码行数:60,代码来源:input.py


示例7: _model_fn

 def _model_fn(features, labels, mode):
   """Model function."""
   assert labels is None, labels
   (all_scores, model_predictions, losses,
    training_op) = clustering_ops.KMeans(
        self._parse_tensor_or_dict(features),
        self._num_clusters,
        self._training_initial_clusters,
        self._distance_metric,
        self._use_mini_batch,
        random_seed=self._random_seed,
        kmeans_plus_plus_num_retries=self.
        _kmeans_plus_plus_num_retries).training_graph()
   incr_step = state_ops.assign_add(variables.get_global_step(), 1)
   loss = math_ops.reduce_sum(losses, name=KMeansClustering.LOSS_OP_NAME)
   logging_ops.scalar_summary('loss/raw', loss)
   training_op = with_dependencies([training_op, incr_step], loss)
   predictions = {
       KMeansClustering.ALL_SCORES: all_scores[0],
       KMeansClustering.CLUSTER_IDX: model_predictions[0],
   }
   eval_metric_ops = {KMeansClustering.SCORES: loss,}
   if self._relative_tolerance is not None:
     training_hooks = [self.LossRelativeChangeHook(self._relative_tolerance)]
   else:
     training_hooks = None
   return ModelFnOps(
       mode=mode,
       predictions=predictions,
       eval_metric_ops=eval_metric_ops,
       loss=loss,
       train_op=training_op,
       training_hooks=training_hooks)
开发者ID:cancan101,项目名称:tensorflow,代码行数:33,代码来源:kmeans.py


示例8: _get_train_ops

    def _get_train_ops(self, features, targets):
        """See base class."""
        global_step = contrib_variables.get_global_step()
        assert global_step
        logits = self._logits(features, is_training=True)
        if self._enable_centered_bias:
            centered_bias_step = [self._centered_bias_step(targets, features)]
        else:
            centered_bias_step = []
        with ops.control_dependencies(centered_bias_step):
            loss = self._loss(logits, targets, features)
        logging_ops.scalar_summary("loss", loss)

        linear_vars = self._get_linear_vars()
        dnn_vars = self._get_dnn_vars()
        grads = gradients.gradients(loss, dnn_vars + linear_vars)
        if self._gradient_clip_norm:
            grads, _ = clip_ops.clip_by_global_norm(grads, self._gradient_clip_norm)

        dnn_grads = grads[0 : len(dnn_vars)]
        linear_grads = grads[len(dnn_vars) :]

        train_ops = self._get_linear_training_ops(linear_grads, linear_vars) + self._get_dnn_training_ops(
            dnn_grads, dnn_vars
        )

        train_step = control_flow_ops.group(*train_ops, name="combined_training_op")
        with ops.control_dependencies([train_step]):
            with ops.get_default_graph().colocate_with(global_step):
                return state_ops.assign_add(global_step, 1).op, loss
开发者ID:285219011,项目名称:liuwenfeng,代码行数:30,代码来源:dnn_linear_combined.py


示例9: _get_train_ops

  def _get_train_ops(self, features, targets):
    """See base class."""
    if not isinstance(self._linear_optimizer, sdca_optimizer.SDCAOptimizer):
      return super(LinearRegressor, self)._get_train_ops(features, targets)
    assert not self._joint_weights, ("_joint_weights is incompatible with"
                                     " SDCAOptimizer.")
    global_step = contrib_variables.get_or_create_global_step()

    logits, columns_to_variables, bias = (
        layers.weighted_sum_from_feature_columns(
            columns_to_tensors=features,
            feature_columns=self._linear_feature_columns,
            num_outputs=self._target_column.num_label_columns,
            weight_collections=[self._linear_model.get_scope_name()],
            scope=self._linear_model.get_scope_name()))
    with ops.control_dependencies([self._centered_bias()]):
      loss = self._target_column.loss(logits, targets, features)
      logging_ops.scalar_summary("loss", loss)

      _add_bias_column(self._linear_feature_columns, features, bias, targets,
                       columns_to_variables)

    train_op = self._linear_optimizer.get_train_step(
        columns_to_variables, self._target_column.weight_column_name,
        self._loss_type(), features, targets, global_step)
    return train_op, loss
开发者ID:KalraA,项目名称:tensorflow,代码行数:26,代码来源:linear.py


示例10: gradient_clipping

  def gradient_clipping(grads_and_vars):
    """Internal function for adaptive clipping."""
    grads, variables = zip(*grads_and_vars)

    norm = clip_ops.global_norm(grads)

    max_norm, log_mean = _adaptive_max_norm(
        norm, std_factor, decay, global_step, epsilon, name)

    # reports the max gradient norm for debugging
    if report_summary:
      logging_ops.scalar_summary(
          "global_norm/adaptive_max_gradient_norm", max_norm)

    # factor will be 1. if norm is smaller than max_norm
    factor = math_ops.select(norm < max_norm,
                             array_ops.ones_like(norm),
                             math_ops.exp(log_mean) / norm)

    if static_max_norm is not None:
      factor = math_ops.minimum(static_max_norm / norm, factor)

    # apply factor
    clipped_grads = []
    for grad in grads:
      if grad is None:
        clipped_grads.append(None)
      elif isinstance(grad, ops.IndexedSlices):
        clipped_grads.append(ops.IndexedSlices(
            grad.values * factor, grad.indices, grad.dense_shape))
      else:
        clipped_grads.append(grad * factor)

    return list(zip(clipped_grads, variables))
开发者ID:caikehe,项目名称:tensorflow,代码行数:34,代码来源:optimizers.py


示例11: _make_per_class_queues

def _make_per_class_queues(data, labels, num_classes, queue_capacity,
                           threads_per_queue):
  """Creates per-class-queues based on data and labels."""
  # Create one queue per class.
  queues = []
  per_data_shape = data.get_shape().with_rank_at_least(1)[1:]
  per_data_shape.assert_is_fully_defined()

  for i in range(num_classes):
    q = data_flow_ops.FIFOQueue(capacity=queue_capacity,
                                shapes=per_data_shape, dtypes=[data.dtype],
                                name='stratified_sample_class%d_queue' % i)
    logging_ops.scalar_summary('queue/stratified_sample_class%d' % i, q.size())
    queues.append(q)

  # Partition tensors according to labels.
  partitions = data_flow_ops.dynamic_partition(data, labels, num_classes)

  # Enqueue each tensor on the per-class-queue.
  for i in range(num_classes):
    enqueue_op = queues[i].enqueue_many(partitions[i]),
    queue_runner.add_queue_runner(queue_runner.QueueRunner(
        queues[i], [enqueue_op] * threads_per_queue))

  return queues
开发者ID:Brandon-Tai,项目名称:tensorflow,代码行数:25,代码来源:sampling_ops.py


示例12: _centered_bias

def _centered_bias(num_label_columns):
  centered_bias = variables.Variable(
      array_ops.zeros([num_label_columns]),
      collections=[_CENTERED_BIAS, ops.GraphKeys.VARIABLES],
      name=_CENTERED_BIAS_WEIGHT)
  logging_ops.scalar_summary(
      ["centered_bias %d" % cb for cb in range(num_label_columns)],
      array_ops.reshape(centered_bias, [-1]))
  return centered_bias
开发者ID:MrCrumpets,项目名称:tensorflow,代码行数:9,代码来源:dnn.py


示例13: input_producer

def input_producer(input_tensor, element_shape=None, num_epochs=None,
                   shuffle=True, seed=None, capacity=32, shared_name=None,
                   summary_name=None, name=None):
  """Output the rows of `input_tensor` to a queue for an input pipeline.

  Args:
    input_tensor: A tensor with the rows to produce. Must be at least
      one-dimensional. Must either have a fully-defined shape, or
      `element_shape` must be defined.
    element_shape: (Optional.) A `TensorShape` representing the shape of a
      row of `input_tensor`, if it cannot be inferred.
    num_epochs: (Optional.) An integer. If specified `input_producer` produces
      each row of `input_tensor` `num_epochs` times before generating an
      `OutOfRange` error. If not specified, `input_producer` can cycle through
      the rows of `input_tensor` an unlimited number of times.
    shuffle: (Optional.) A boolean. If true, the rows are randomly shuffled
      within each epoch.
    seed: (Optional.) An integer. The seed to use if `shuffle` is true.
    capacity: (Optional.) The capacity of the queue to be used for buffering
      the input.
    shared_name: (Optional.) If set, this queue will be shared under the given
      name across multiple sessions.
    summary_name: (Optional.) If set, a scalar summary for the current queue
      size will be generated, using this name as part of the tag.
    name: (Optional.) A name for queue.

  Returns:
    A queue with the output rows.  A `QueueRunner` for the queue is
    added to the current `QUEUE_RUNNER` collection of the current
    graph.

  Raises:
    ValueError: If the shape of the input cannot be inferred from the arguments.
  """
  with ops.name_scope(name, "input_producer", [input_tensor]):
    input_tensor = ops.convert_to_tensor(input_tensor, name="input_tensor")
    element_shape = input_tensor.get_shape()[1:].merge_with(element_shape)
    if not element_shape.is_fully_defined():
      raise ValueError("Either `input_tensor` must have a fully defined shape "
                       "or `element_shape` must be specified")

    if shuffle:
      input_tensor = random_ops.random_shuffle(input_tensor, seed=seed)

    input_tensor = limit_epochs(input_tensor, num_epochs)

    q = data_flow_ops.FIFOQueue(capacity=capacity,
                                dtypes=[input_tensor.dtype.base_dtype],
                                shapes=[element_shape],
                                shared_name=shared_name, name=name)
    enq = q.enqueue_many([input_tensor])
    queue_runner.add_queue_runner(queue_runner.QueueRunner(q, [enq]))
    if summary_name is not None:
      logging_ops.scalar_summary("queue/%s/%s" % (q.name, summary_name),
                                 math_ops.cast(q.size(), dtypes.float32) *
                                 (1. / capacity))
    return q
开发者ID:marevol,项目名称:tensorflow,代码行数:57,代码来源:input.py


示例14: _centered_bias

def _centered_bias(num_outputs):
  centered_bias = variables.Variable(
      array_ops.zeros([num_outputs]),
      collections=["centered_bias", ops.GraphKeys.VARIABLES],
      name="centered_bias_weight")
  logging_ops.scalar_summary(
      ["centered_bias_%d" % cb for cb in range(num_outputs)],
      array_ops.reshape(centered_bias, [-1]))
  return centered_bias
开发者ID:KalraA,项目名称:tensorflow,代码行数:9,代码来源:linear.py


示例15: _centered_bias

def _centered_bias(logits_dimension, weight_collection):
  """Creates and returns centered bias."""
  centered_bias = variables.Variable(
      array_ops.zeros([logits_dimension]),
      collections=[weight_collection, ops.GraphKeys.VARIABLES],
      name="centered_bias_weight")
  logging_ops.scalar_summary(
      ["centered_bias_%d" % cb for cb in range(logits_dimension)],
      array_ops.reshape(centered_bias, [-1]))
  return centered_bias
开发者ID:caikehe,项目名称:tensorflow,代码行数:10,代码来源:head.py


示例16: _centered_bias

 def _centered_bias(self):
   centered_bias = variables.Variable(
       array_ops.zeros([self._num_label_columns()]),
       collections=[self._centered_bias_weight_collection,
                    ops.GraphKeys.VARIABLES],
       name="centered_bias_weight")
   logging_ops.scalar_summary(
       ["centered_bias_%d" % cb for cb in range(self._num_label_columns())],
       array_ops.reshape(centered_bias, [-1]))
   return centered_bias
开发者ID:Ambier,项目名称:tensorflow,代码行数:10,代码来源:dnn_linear_combined.py


示例17: prefetch_queue

def prefetch_queue(tensors,
                   capacity=8,
                   shared_name=None,
                   name=None):
  """Creates a queue to prefetech tensors from `tensors`.

  A queue runner for enqueing tensors into the prefetch_queue is automatically
  added to the TF QueueRunners collection.

  Example:
  This is for example useful to pre-assemble input batches read with
  `tf.train.batch()` and enqueue the pre-assembled batches.  Ops that dequeue
  from the pre-assembled queue will not pay the cost of assembling the batch.

  images, labels = tf.train.batch([image, label], batch_size=32, num_threads=4)
  batch_queue = prefetch_queue([images, labels])
  images, labels = batch_queue.dequeue()
  logits = Net(images)
  loss = Loss(logits, labels)

  Args:
    tensors: A list or dictionary of `Tensors` to enqueue in the buffer.
    capacity: An integer. The maximum number of elements in the queue.
    shared_name: (optional). If set, this queue will be shared under the given
      name across multiple sessions.
    name: (Optional) A name for the operations.

  Returns:
    A queue from which you can dequeue tensors with the same type and shape
    as `tensors`.
  """
  if isinstance(tensors, dict):
    # Need to wrap the keys and values in list() since Python3 returns views.
    # We sort the keys so the order is consistent across runs.
    names = list(sorted(tensors.keys()))
    tensor_list = list([tensors[n] for n in names])
  else:
    names = None
    tensor_list = tensors

  with ops.name_scope(name, "prefetch_queue", tensor_list) as name:
    dtypes = [t.dtype for t in tensor_list]
    shapes = [t.get_shape() for t in tensor_list]
    queue = data_flow_ops.FIFOQueue(capacity=capacity,
                                    dtypes=dtypes,
                                    shapes=shapes,
                                    names=names,
                                    shared_name=shared_name)
    enqueue_op = queue.enqueue(tensors, name=name)
    queue_runner.add_queue_runner(
        queue_runner.QueueRunner(queue, [enqueue_op]))
    logging_ops.scalar_summary(
        "queue/%s/fraction_of_%d_full" % (queue.name, capacity),
        math_ops.to_float(queue.size()) * (1. / capacity))
    return queue
开发者ID:821760408-sp,项目名称:tensorflow,代码行数:55,代码来源:prefetch_queue.py


示例18: create_estimator_spec

  def create_estimator_spec(
      self, features, mode, logits, labels=None, train_op_fn=None):
    """See `Head`."""
    with variable_scope.variable_scope(
        None,
        default_name='regression_head',
        values=(tuple(six.itervalues(features)) + (labels, logits))):

      # Predict.
      logits = _check_logits(logits, self._logits_dimension)
      predictions = {prediction_keys.PredictionKeys.PREDICTIONS: logits}
      if mode == model_fn.ModeKeys.PREDICT:
        return model_fn.EstimatorSpec(
            mode=model_fn.ModeKeys.PREDICT,
            predictions=predictions,
            export_outputs={'': export_output.RegressionOutput(value=logits)})

      # Eval.
      labels = _check_labels(_maybe_expand_dim(math_ops.to_float(labels)),
                             self._logits_dimension)
      unweighted_loss = losses.mean_squared_error(
          labels=labels, predictions=logits, reduction=losses.Reduction.NONE)
      weights = (
          1. if (self._weight_feature_key is None) else
          features[self._weight_feature_key])
      weights = _maybe_expand_dim(math_ops.to_float(weights, name='weights'))
      training_loss = losses.compute_weighted_loss(
          unweighted_loss, weights=weights, reduction=losses.Reduction.SUM)
      if mode == model_fn.ModeKeys.EVAL:
        # Estimator already adds a metric for loss.
        eval_metric_ops = {
            metric_keys.MetricKeys.LOSS_MEAN: metrics_lib.mean(
                unweighted_loss, weights=weights)
        }
        return model_fn.EstimatorSpec(
            mode=model_fn.ModeKeys.EVAL,
            predictions=predictions,
            loss=training_loss,
            eval_metric_ops=eval_metric_ops)

      # Train.
      if train_op_fn is None:
        raise ValueError('train_op_fn can not be None.')
      logging_ops.scalar_summary(metric_keys.MetricKeys.LOSS, training_loss)
      logging_ops.scalar_summary(
          metric_keys.MetricKeys.LOSS_MEAN,
          losses.compute_weighted_loss(
              unweighted_loss, weights=weights,
              reduction=losses.Reduction.MEAN))
      return model_fn.EstimatorSpec(
          mode=model_fn.ModeKeys.TRAIN,
          predictions=predictions,
          loss=training_loss,
          train_op=train_op_fn(training_loss))
开发者ID:astorfi,项目名称:tensorflow,代码行数:54,代码来源:head.py


示例19: _input_producer

def _input_producer(input_tensor, dtype, num_epochs, shuffle, seed, capacity,
                    shared_name, name, summary_name):
  if shuffle:
    input_tensor = random_ops.random_shuffle(input_tensor, seed=seed)
  input_tensor = limit_epochs(input_tensor, num_epochs)

  q = data_flow_ops.FIFOQueue(capacity=capacity, dtypes=[dtype], shapes=[[]],
                              shared_name=shared_name, name=name)
  enq = q.enqueue_many([input_tensor])
  queue_runner.add_queue_runner(queue_runner.QueueRunner(q, [enq]))
  logging_ops.scalar_summary("queue/%s/%s" % (q.name, summary_name),
                             math_ops.cast(q.size(), dtypes.float32) *
                             (1. / capacity))
  return q
开发者ID:BersaKAIN,项目名称:tensorflow,代码行数:14,代码来源:input.py


示例20: _conditional_batch

def _conditional_batch(tensors, accept_prob, batch_size, queue_threads=10):
  """Conditionally enqueue tensors based on accept_prob.

  Specifically, enqueue the element if accept_prob > rand_unif([0, 1]).

  Args:
      tensors: List of tensors to enqueue.
      accept_prob: Acceptance probability per example.
      batch_size: Size of batch.
      queue_threads: Number of threads enqueuing in the final queue.

  Returns:
      List of batched tensors.

  Raises:
      ValueError: `accept_prob` isn't 0D.
  """
  accept_prob.get_shape().assert_has_rank(0)
  # Determine shapes and types of to-be-enqueued-tensors.
  shapes_list = []
  dtypes_list = []
  for tensor in tensors:
    cur_shape = tensor.get_shape()
    cur_shape.assert_is_fully_defined()
    shapes_list.append(cur_shape)
    dtypes_list.append(tensor.dtype)

  final_q = data_flow_ops.FIFOQueue(capacity=batch_size,
                                    shapes=shapes_list,
                                    dtypes=dtypes_list,
                                    name='batched_queue')
  logging_ops.scalar_summary('queue/%s/size' % final_q.name, final_q.size())

  # Conditionally enqueue.
  # Reshape enqueue op to match no_op's shape.
  eq_tf = math_ops.less(random_ops.random_uniform([]), accept_prob)
  conditional_enqueue = control_flow_ops.cond(
      eq_tf,
      lambda: final_q.enqueue(tensors),
      control_flow_ops.no_op)
  queue_runner.add_queue_runner(queue_runner.QueueRunner(
      final_q, [conditional_enqueue] * queue_threads))

  out_tensor = final_q.dequeue_many(batch_size)
  # Queues return a single tensor if the list of enqued tensors is one. Since we
  # want the type to be the same in all cases, always return a list.
  if isinstance(out_tensor, ops.Tensor):
    out_tensor = [out_tensor]

  return out_tensor
开发者ID:govindap,项目名称:tensorflow,代码行数:50,代码来源:sampling_ops.py



注:本文中的tensorflow.python.ops.logging_ops.scalar_summary函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。


鲜花

握手

雷人

路过

鸡蛋
该文章已有0人参与评论

请发表评论

全部评论

专题导读
上一篇:
Python lookup_ops.index_table_from_tensor函数代码示例发布时间:2022-05-27
下一篇:
Python logging_ops.print_v2函数代码示例发布时间:2022-05-27
热门推荐
阅读排行榜

扫描微信二维码

查看手机版网站

随时了解更新最新资讯

139-2527-9053

在线客服(服务时间 9:00~18:00)

在线QQ客服
地址:深圳市南山区西丽大学城创智工业园
电邮:jeky_zhao#qq.com
移动电话:139-2527-9053

Powered by 互联科技 X3.4© 2001-2213 极客世界.|Sitemap