• 设为首页
  • 点击收藏
  • 手机版
    手机扫一扫访问
    迪恩网络手机版
  • 关注官方公众号
    微信扫一扫关注
    公众号

Python control_flow_ops.no_op函数代码示例

原作者: [db:作者] 来自: [db:来源] 收藏 邀请

本文整理汇总了Python中tensorflow.python.ops.control_flow_ops.no_op函数的典型用法代码示例。如果您正苦于以下问题:Python no_op函数的具体用法?Python no_op怎么用?Python no_op使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。



在下文中一共展示了no_op函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。

示例1: testQueueRunnerSerializationRoundTrip

  def testQueueRunnerSerializationRoundTrip(self):
    graph = ops.Graph()
    with graph.as_default():
      queue = data_flow_ops.FIFOQueue(10, dtypes.float32, name="queue")
      enqueue_op = control_flow_ops.no_op(name="enqueue")
      close_op = control_flow_ops.no_op(name="close")
      cancel_op = control_flow_ops.no_op(name="cancel")
      qr0 = queue_runner_impl.QueueRunner(
          queue, [enqueue_op],
          close_op,
          cancel_op,
          queue_closed_exception_types=(errors_impl.OutOfRangeError,
                                        errors_impl.CancelledError))
      qr0_proto = queue_runner_impl.QueueRunner.to_proto(qr0)
      qr0_recon = queue_runner_impl.QueueRunner.from_proto(qr0_proto)
      self.assertEqual("queue", qr0_recon.queue.name)
      self.assertEqual(1, len(qr0_recon.enqueue_ops))
      self.assertEqual(enqueue_op, qr0_recon.enqueue_ops[0])
      self.assertEqual(close_op, qr0_recon.close_op)
      self.assertEqual(cancel_op, qr0_recon.cancel_op)
      self.assertEqual(
          (errors_impl.OutOfRangeError, errors_impl.CancelledError),
          qr0_recon.queue_closed_exception_types)

      # Assert we reconstruct an OutOfRangeError for QueueRunners
      # created before QueueRunnerDef had a queue_closed_exception_types field.
      del qr0_proto.queue_closed_exception_types[:]
      qr0_legacy_recon = queue_runner_impl.QueueRunner.from_proto(qr0_proto)
      self.assertEqual("queue", qr0_legacy_recon.queue.name)
      self.assertEqual(1, len(qr0_legacy_recon.enqueue_ops))
      self.assertEqual(enqueue_op, qr0_legacy_recon.enqueue_ops[0])
      self.assertEqual(close_op, qr0_legacy_recon.close_op)
      self.assertEqual(cancel_op, qr0_legacy_recon.cancel_op)
      self.assertEqual((errors_impl.OutOfRangeError,),
                       qr0_legacy_recon.queue_closed_exception_types)
开发者ID:ThunderQi,项目名称:tensorflow,代码行数:35,代码来源:queue_runner_test.py


示例2: testAllArgumentsSet

 def testAllArgumentsSet(self):
   """Tests that no errors are raised when all arguments are set."""
   with ops.Graph().as_default(), self.cached_session():
     loss = constant_op.constant(1.)
     predictions = {'loss': loss}
     classes = constant_op.constant('hello')
     metric_obj = metrics.Mean()
     metric_obj.update_state(loss)
     model_fn.EstimatorSpec(
         mode=model_fn.ModeKeys.TRAIN,
         predictions=predictions,
         loss=loss,
         train_op=control_flow_ops.no_op(),
         eval_metric_ops={
             'loss': (control_flow_ops.no_op(), loss),
             'mean': metric_obj,
         },
         export_outputs={
             'head_name': export_output.ClassificationOutput(classes=classes)
         },
         training_chief_hooks=[_FakeHook()],
         training_hooks=[_FakeHook()],
         scaffold=monitored_session.Scaffold(),
         evaluation_hooks=[_FakeHook()],
         prediction_hooks=[_FakeHook()])
开发者ID:AnishShah,项目名称:tensorflow,代码行数:25,代码来源:model_fn_test.py


示例3: per_example_quantile_regression_loss

def per_example_quantile_regression_loss(labels, weights, predictions,
                                         quantile):
  """Smoothed loss for quantile regression.

  The standard quantile regression loss is quantile*(y-y') when y>y' and
  (quantile-1)*(y-y') otherwise, y' is a prediction, y is a label. The impl
  below is this loss but squared in the region where the loss value < 1.

  Args:
    labels: Rank 2 (N, D) tensor of per-example labels.
    weights: Rank 2 (N, 1) tensor of per-example weights.
    predictions: Rank 2 (N, D) tensor of per-example predictions.
    quantile: The quantile to use.

  Returns:
    loss: A Rank 2 (N, 1) tensor of per-example quantile loss.
    update_op: An update operation to update the loss's internal state.
  """
  labels = math_ops.to_float(labels)
  error = labels - predictions
  square_loss_right = array_ops.where(error * quantile < 1.0,
                                      math_ops.square(quantile * error),
                                      quantile * error)
  square_loss_left = array_ops.where(error * (quantile - 1) < 1,
                                     math_ops.square((quantile - 1) * error),
                                     (quantile - 1) * error)

  unweighted_loss = array_ops.where(error > 0, square_loss_right,
                                    square_loss_left)
  if weights is None:
    return unweighted_loss, control_flow_ops.no_op()
  else:
    return unweighted_loss * weights, control_flow_ops.no_op()
开发者ID:Ajaycs99,项目名称:tensorflow,代码行数:33,代码来源:losses.py


示例4: _model_fn

 def _model_fn(features, labels, mode):
   _ = labels
   x = features['x']
   y = features['y']
   with ops.name_scope('outputs'):
     predictions = {'sum': math_ops.add(x, y, name='sum'),
                    'product': math_ops.multiply(x, y, name='product'),
                    'difference': math_ops.subtract(x, y, name='difference')}
   if core:
     export_outputs = {k: export_output.PredictOutput({k: v})
                       for k, v in predictions.items()}
     export_outputs[signature_constants.
                    DEFAULT_SERVING_SIGNATURE_DEF_KEY] = export_outputs['sum']
     return model_fn.EstimatorSpec(mode=mode,
                                   predictions=predictions,
                                   export_outputs=export_outputs,
                                   loss=constant_op.constant(0),
                                   train_op=control_flow_ops.no_op())
   else:
     output_alternatives = {k: (constants.ProblemType.UNSPECIFIED, {k: v})
                            for k, v in predictions.items()}
     return contrib_model_fn.ModelFnOps(
         mode=mode,
         predictions=predictions,
         output_alternatives=output_alternatives,
         loss=constant_op.constant(0),
         train_op=control_flow_ops.no_op())
开发者ID:1000sprites,项目名称:tensorflow,代码行数:27,代码来源:testing_common.py


示例5: per_example_maxent_loss

def per_example_maxent_loss(labels, weights, logits, num_classes, eps=1e-15):
  """Maximum entropy loss for multiclass problems.

  Maximum entropy is a generalization of logistic loss for the case when more
  than 2 classes are present.

  Args:
    labels: Rank 2 (N, 1) or Rank 1 (N) tensor of per-example labels.
    weights: Rank 2 (N, 1) tensor of per-example weights.
    logits: Rank 2 (N, K) tensor of per-example predictions, K - num of
    classes.
    num_classes: number of classes in classification task. Used to expand label
    indices into one-hot encodings.
    eps: tolerance, used as a minimum possible value.

  Returns:
    loss: A Rank 2 (N, 1) tensor of per-example maxent loss
    update_op: An update operation to update the loss's internal state.
  """
  labels = math_ops.to_int64(labels)
  # If labels are of rank 1, make them rank 2.
  labels_shape = labels.get_shape()
  if len(labels_shape) != 2:
    labels = array_ops.expand_dims(labels, 1)
  # Labels are indices of classes, convert them to one hot encodings.
  target_one_hot = array_ops.one_hot(indices=labels, depth=num_classes)
  labels = math_ops.reduce_sum(
      input_tensor=target_one_hot, reduction_indices=[1])
  labels = math_ops.to_float(labels)

  # Calculate softmax probabilities for each class.
  unnormalized_probs = math_ops.exp(logits)
  normalizers = math_ops.reduce_sum(unnormalized_probs, 1, keepdims=True)
  softmax_predictions = math_ops.divide(unnormalized_probs,
                                        math_ops.add(normalizers, eps))

  # Pull out the probabilities for real label.
  probs_for_real_class = math_ops.reduce_sum(labels * softmax_predictions, 1)

  # Add handling for values near 0 and 1.
  zeros = array_ops.zeros_like(probs_for_real_class, dtype=logits.dtype) + eps
  one_minus_eps = array_ops.ones_like(
      probs_for_real_class, dtype=logits.dtype) - eps

  # Take maximum(eps, pred)
  cond = (probs_for_real_class >= eps)
  probs_for_real_class = array_ops.where(cond, probs_for_real_class, zeros)

  # Take minimum(1-eps, pred)
  cond = (probs_for_real_class <= 1 - eps)
  probs_for_real_class = array_ops.where(cond, probs_for_real_class,
                                         one_minus_eps)

  unweighted_loss = array_ops.expand_dims(-math_ops.log(probs_for_real_class),
                                          1)
  if weights is None:
    return unweighted_loss, control_flow_ops.no_op()
  else:
    return unweighted_loss * weights, control_flow_ops.no_op()
开发者ID:AndrewTwinz,项目名称:tensorflow,代码行数:59,代码来源:losses.py


示例6: metrics_fn

 def metrics_fn(predictions, features):
   # checking that the inputs are properly passed.
   predict = predictions["mean"]
   target = features[feature_keys.TrainEvalFeatures.VALUES][:, -1, 0]
   return {
       "plain_boring_metric386":
           (math_ops.reduce_mean(math_ops.abs(predict - target)),
            control_flow_ops.no_op()),
       "fun_metric101": (math_ops.reduce_sum(predict + target),
                         control_flow_ops.no_op()),
   }
开发者ID:abhinav-upadhyay,项目名称:tensorflow,代码行数:11,代码来源:head_test.py


示例7: _cached_copy

  def _cached_copy(self, var, name, pass_through=False):
    """Helper function to create a worker cached copy of a Variable.

    This assigns the var (either a single Variable or a list of Variables) to
    local transient cache Variable(s). Note that if var is a list of Variables,
    the assignment is done sequentially to minimize the memory overheads.
    Also note that if pass_through is set to True, this does not create new
    Variables but simply return the input back.

    Args:
      var: A Variable or a list of Variables to cache.
      name: name of cached Variable.
      pass_through: when set to True, this simply pass through the var back
        through identity operator and does not actually creates a cache.

    Returns:
      Tuple consisting of following three entries:
      cache: the new transient Variable or list of transient Variables
        corresponding one-to-one with var.
      cache_init: op to initialize the Variable or the list of Variables.
      cache_reset: op to reset the Variable or the list of Variables to some
        default value.
    """
    if var is None:
      return None, None, None
    elif pass_through:
      cache = var
      cache_init = control_flow_ops.no_op()
      cache_reset = control_flow_ops.no_op()
    elif isinstance(var, variables.Variable):
      cache = WALSModel._transient_var(name=name)
      with ops.colocate_with(cache):
        cache_init = state_ops.assign(cache, var, validate_shape=False)
        cache_reset = state_ops.assign(cache, 1.0, validate_shape=False)
    else:
      assert isinstance(var, list)
      assert var
      cache = [
          WALSModel._transient_var(name="%s_shard_%d" % (name, i))
          for i in xrange(len(var))
      ]
      reset_ops = []
      for i, c in enumerate(cache):
        with ops.colocate_with(c):
          if i == 0:
            cache_init = state_ops.assign(c, var[i], validate_shape=False)
          else:
            with ops.control_dependencies([cache_init]):
              cache_init = state_ops.assign(c, var[i], validate_shape=False)
          reset_ops.append(state_ops.assign(c, 1.0, validate_shape=False))
      cache_reset = control_flow_ops.group(*reset_ops)

    return cache, cache_init, cache_reset
开发者ID:Joetz,项目名称:tensorflow,代码行数:53,代码来源:factorization_ops.py


示例8: _model_fn

 def _model_fn(features, labels, mode):
   del features  # unused
   del labels
   return model_fn_lib.EstimatorSpec(
       mode,
       train_op=control_flow_ops.no_op(),
       loss=constant_op.constant(1.),
       eval_metric_ops={
           'nested_metric': (
               ((constant_op.constant(2.), constant_op.constant(1)),
                constant_op.constant(3., dtype=dtypes.float64)),
               control_flow_ops.no_op())})
开发者ID:LugarkPirog,项目名称:tensorflow,代码行数:12,代码来源:estimator_test.py


示例9: _define_maximization_operation

  def _define_maximization_operation(self, num_batches):
    """Maximization operations."""
    # TODO(xavigonzalvo): some of these operations could be moved to C++.
    # Compute the effective number of data points assigned to component k.
    with ops.control_dependencies(self._w):
      points_in_k = array_ops.squeeze(
          math_ops.add_n(self._points_in_k), axis=[0])
      # Update alpha.
      if 'w' in self._params:
        final_points_in_k = points_in_k / num_batches
        num_examples = math_ops.cast(math_ops.reduce_sum(final_points_in_k),
                                     dtypes.float32)
        self._alpha_op = self._alpha.assign(final_points_in_k /
                                            (num_examples + MEPS))
      else:
        self._alpha_op = control_flow_ops.no_op()
      self._train_ops = [self._alpha_op]

      # Update means.
      points_in_k_expanded = array_ops.reshape(points_in_k,
                                               [self._num_classes, 1, 1])
      if 'm' in self._params:
        self._means_op = self._means.assign(
            math_ops.div(
                math_ops.add_n(self._w_mul_x), points_in_k_expanded + MEPS))
      else:
        self._means_op = control_flow_ops.no_op()
      # means are (num_classes x 1 x dims)

      # Update covariances.
      with ops.control_dependencies([self._means_op]):
        b = math_ops.add_n(self._w_mul_x2) / (points_in_k_expanded + MEPS)
        new_covs = []
        for k in range(self._num_classes):
          mean = self._means.value()[k, :, :]
          square_mean = math_ops.matmul(mean, mean, transpose_a=True)
          new_cov = b[k, :, :] - square_mean + self._min_var
          if self._covariance_type == FULL_COVARIANCE:
            new_covs.append(array_ops.expand_dims(new_cov, 0))
          elif self._covariance_type == DIAG_COVARIANCE:
            new_covs.append(
                array_ops.expand_dims(array_ops.diag_part(new_cov), 0))
        new_covs = array_ops.concat(new_covs, 0)
        if 'c' in self._params:
          # Train operations don't need to take care of the means
          # because covariances already depend on it.
          with ops.control_dependencies([self._means_op, new_covs]):
            self._train_ops.append(
                state_ops.assign(
                    self._covs, new_covs, validate_shape=False))
开发者ID:Albert-Z-Guo,项目名称:tensorflow,代码行数:50,代码来源:gmm_ops.py


示例10: testLossNotScalar

 def testLossNotScalar(self):
   with ops.Graph().as_default(), self.test_session():
     with self.assertRaisesRegexp(ValueError, 'Loss must be scalar'):
       model_fn.EstimatorSpec(
           mode=model_fn.ModeKeys.TRAIN,
           loss=constant_op.constant([1., 2.]),
           train_op=control_flow_ops.no_op())
开发者ID:AlbertXiebnu,项目名称:tensorflow,代码行数:7,代码来源:model_fn_test.py


示例11: testLoss1DTensor

 def testLoss1DTensor(self):
   """Tests that no errors are raised when loss is 1D tensor."""
   with ops.Graph().as_default(), self.test_session():
     model_fn.EstimatorSpec(
         mode=model_fn.ModeKeys.TRAIN,
         loss=constant_op.constant([1.]),
         train_op=control_flow_ops.no_op())
开发者ID:AlbertXiebnu,项目名称:tensorflow,代码行数:7,代码来源:model_fn_test.py


示例12: create_model_fn_ops

  def create_model_fn_ops(self, predictions, output_alternatives,
                          mode=model_fn.ModeKeys.INFER):

    return model_fn.ModelFnOps(
        model_fn.ModeKeys.INFER,
        predictions=predictions,
        loss=constant_op.constant([1]),
        train_op=control_flow_ops.no_op(),
        eval_metric_ops={
            "metric_key": (constant_op.constant(1.), control_flow_ops.no_op()),
            "loss": (constant_op.constant(1.), control_flow_ops.no_op()),
        },
        training_chief_hooks=[basic_session_run_hooks.StepCounterHook()],
        training_hooks=[basic_session_run_hooks.StepCounterHook()],
        output_alternatives=output_alternatives,
        scaffold=monitored_session.Scaffold())
开发者ID:1000sprites,项目名称:tensorflow,代码行数:16,代码来源:model_fn_test.py


示例13: testRequiredArgumentsSet

 def testRequiredArgumentsSet(self):
   """Tests that no errors are raised when all required arguments are set."""
   with ops.Graph().as_default(), self.test_session():
     model_fn.EstimatorSpec(
         mode=model_fn.ModeKeys.TRAIN,
         loss=constant_op.constant(1.),
         train_op=control_flow_ops.no_op())
开发者ID:AlbertXiebnu,项目名称:tensorflow,代码行数:7,代码来源:model_fn_test.py


示例14: test_metric_op_is_tensor

  def test_metric_op_is_tensor(self):
    """Tests that ops.Operation is wrapped by a tensor for metric_ops."""
    with context.graph_mode():
      loss = {'my_loss': constant_op.constant([0])}
      predictions = {u'output1': constant_op.constant(['foo'])}
      metric_obj = metrics_module.Mean()
      metric_obj.update_state(constant_op.constant([0]))
      metrics = {
          'metrics_1': metric_obj,
          'metrics_2': (constant_op.constant([0]), control_flow_ops.no_op())
      }

      outputter = MockSupervisedOutput(loss, predictions, metrics)

      self.assertTrue(outputter.metrics['metrics_1/update_op'].name.startswith(
          'metric_op_wrapper'))
      self.assertTrue(
          isinstance(outputter.metrics['metrics_1/update_op'], ops.Tensor))
      self.assertTrue(
          isinstance(outputter.metrics['metrics_1/value'], ops.Tensor))

      self.assertEqual(outputter.metrics['metrics_2/value'],
                       metrics['metrics_2'][0])
      self.assertTrue(outputter.metrics['metrics_2/update_op'].name.startswith(
          'metric_op_wrapper'))
      self.assertTrue(
          isinstance(outputter.metrics['metrics_2/update_op'], ops.Tensor))
开发者ID:Wajih-O,项目名称:tensorflow,代码行数:27,代码来源:export_output_test.py


示例15: testWatchingOutputSlotWithoutOutgoingEdge

  def testWatchingOutputSlotWithoutOutgoingEdge(self):
    """Test watching output slots not attached to any outgoing edges."""

    with session.Session() as sess:
      u_init_val = np.array([[5.0, 3.0], [-1.0, 0.0]])
      u = constant_op.constant(u_init_val, shape=[2, 2], name="u")

      # Create a control edge from a node with an output: From u to z.
      # Node u will get executed only because of the control edge. The output
      # tensor u:0 is not attached to any outgoing edge in the graph. This test
      # checks that the debugger can watch such a tensor.
      with ops.control_dependencies([u]):
        z = control_flow_ops.no_op(name="z")

      run_options = config_pb2.RunOptions(output_partition_graphs=True)
      debug_utils.watch_graph(
          run_options,
          sess.graph,
          debug_ops=["DebugIdentity"],
          debug_urls=self._debug_urls())

      run_metadata = config_pb2.RunMetadata()
      sess.run(z, options=run_options, run_metadata=run_metadata)

      dump = debug_data.DebugDumpDir(
          self._dump_root, partition_graphs=run_metadata.partition_graphs)

      # Assert that the DebugIdentity watch on u works properly.
      self.assertEqual(1, len(dump.dumped_tensor_data))
      datum = dump.dumped_tensor_data[0]
      self.assertEqual("u", datum.node_name)
      self.assertEqual(0, datum.output_slot)
      self.assertEqual("DebugIdentity", datum.debug_op)
      self.assertAllClose([[5.0, 3.0], [-1.0, 0.0]], datum.get_tensor())
开发者ID:BloodD,项目名称:tensorflow,代码行数:34,代码来源:session_debug_testlib.py


示例16: model_fn

 def model_fn(features, labels):
   # dummy variable:
   _ = variables.Variable([0.])
   _ = labels
   predictions = features["x"]
   loss = constant_op.constant([2.])
   return predictions, loss, control_flow_ops.no_op()
开发者ID:AlbertXiebnu,项目名称:tensorflow,代码行数:7,代码来源:estimators_test.py


示例17: model_fn

  def model_fn(self, mode, features, labels, params):
    c = variable_scope.get_variable(
        'c',
        initializer=constant_op.constant(10, dtype=dtypes.float64),
        dtype=dtypes.float64)

    predictions = math_ops.multiply(features, c)

    loss = None
    if mode is not model_fn_lib.ModeKeys.PREDICT:
      loss = losses.absolute_difference(
          labels=labels,
          predictions=predictions,
          reduction=losses.Reduction.SUM)
      loss = math_ops.reduce_sum(loss)

    metrics = {
        'accuracy': metrics_lib.accuracy(labels, predictions),
        'auc': metrics_lib.auc(labels, predictions)
    }

    return model_fn_lib.EstimatorSpec(
        mode=mode,
        loss=loss,
        eval_metric_ops=metrics,
        predictions={'probabilities': predictions},
        train_op=control_flow_ops.no_op())  # This train_op isn't actually used.
开发者ID:AbhinavJain13,项目名称:tensorflow,代码行数:27,代码来源:replicate_model_fn_test.py


示例18: _TestInsertQuantOpForAddAfterConv2d

  def _TestInsertQuantOpForAddAfterConv2d(self, is_training):
    graph = ops.Graph()
    with graph.as_default():
      batch_size, height, width, depth = 5, 128, 128, 3
      input1 = array_ops.zeros((batch_size, height, width, depth))
      input2 = array_ops.zeros((batch_size, height / 2, width / 2, 32))
      conv = conv2d(input1, 32, [5, 5], stride=2, padding='SAME',
                    weights_initializer=self._WeightInit(0.09),
                    activation_fn=None, scope='test/test')
      node = math_ops.add(conv, input2, name='test/add')
      node = nn_ops.relu6(node, name='test/relu6')
      update_barrier = control_flow_ops.no_op(name='update_barrier')
      with ops.control_dependencies([update_barrier]):
        array_ops.identity(node, name='control_dependency')

    quantize.Quantize(graph, is_training, weight_bits=8, activation_bits=8)

    quantization_node_name = 'FakeQuantWithMinMaxVars'
    conv_quant = graph.get_operation_by_name('test/test/conv_quant/' +
                                             quantization_node_name)
    self.assertEqual(conv_quant.type, quantization_node_name)

    # Scan through all FakeQuant operations, ensuring that the activation
    # isn't in the consumers of the operation. Since activations are folded
    # the preceding operation during inference, the FakeQuant operation after
    # the activation is all that is needed.
    for op in graph.get_operations():
      if op.type == quantization_node_name:
        quant_op = graph.get_operation_by_name(op.name)
        consumers = []
        for output in quant_op.outputs:
          consumers.extend(output.consumers())

        self.assertNotIn('test/relu6', [c.name for c in consumers])
开发者ID:AnishShah,项目名称:tensorflow,代码行数:34,代码来源:quantize_test.py


示例19: assert_type

def assert_type(tensor, tf_type, message=None, name=None):
  """Statically asserts that the given `Tensor` is of the specified type.

  Args:
    tensor: A tensorflow `Tensor`.
    tf_type: A tensorflow type (`dtypes.float32`, `tf.int64`, `dtypes.bool`,
      etc).
    message: A string to prefix to the default message.
    name:  A name to give this `Op`.  Defaults to "assert_type"

  Raises:
    TypeError: If the tensors data type doesn't match `tf_type`.

  Returns:
    A `no_op` that does nothing.  Type can be determined statically.
  """
  message = message or ''
  with ops.name_scope(name, 'assert_type', [tensor]):
    tensor = ops.convert_to_tensor(tensor, name='tensor')
    if tensor.dtype != tf_type:
      if context.executing_eagerly():
        raise TypeError('%s tensor must be of type %s' % (message, tf_type))
      else:
        raise TypeError('%s  %s must be of type %s' % (message, tensor.name,
                                                       tf_type))

    return control_flow_ops.no_op('statically_determined_correct_type')
开发者ID:Jackiefan,项目名称:tensorflow,代码行数:27,代码来源:check_ops.py


示例20: summary_writer_function

def summary_writer_function(name, tensor, function, family=None):
  """Helper function to write summaries.

  Args:
    name: name of the summary
    tensor: main tensor to form the summary
    function: function taking a tag and a scope which writes the summary
    family: optional, the summary's family

  Returns:
    The result of writing the summary.
  """
  def record():
    with summary_op_util.summary_scope(
        name, family, values=[tensor]) as (tag, scope):
      with ops.control_dependencies([function(tag, scope)]):
        return constant_op.constant(True)

  if context.context().summary_writer_resource is None:
    return control_flow_ops.no_op()
  with ops.device("cpu:0"):
    op = utils.smart_cond(
        should_record_summaries(), record, _nothing, name="")
    ops.add_to_collection(ops.GraphKeys._SUMMARY_COLLECTION, op)  # pylint: disable=protected-access
  return op
开发者ID:ChengYuXiang,项目名称:tensorflow,代码行数:25,代码来源:summary_ops.py



注:本文中的tensorflow.python.ops.control_flow_ops.no_op函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。


鲜花

握手

雷人

路过

鸡蛋
该文章已有0人参与评论

请发表评论

全部评论

专题导读
上一篇:
Python control_flow_ops.switch函数代码示例发布时间:2022-05-27
下一篇:
Python control_flow_ops.merge函数代码示例发布时间:2022-05-27
热门推荐
阅读排行榜

扫描微信二维码

查看手机版网站

随时了解更新最新资讯

139-2527-9053

在线客服(服务时间 9:00~18:00)

在线QQ客服
地址:深圳市南山区西丽大学城创智工业园
电邮:jeky_zhao#qq.com
移动电话:139-2527-9053

Powered by 互联科技 X3.4© 2001-2213 极客世界.|Sitemap