• 设为首页
  • 点击收藏
  • 手机版
    手机扫一扫访问
    迪恩网络手机版
  • 关注官方公众号
    微信扫一扫关注
    公众号

Python math_ops.add_n函数代码示例

原作者: [db:作者] 来自: [db:来源] 收藏 邀请

本文整理汇总了Python中tensorflow.python.ops.math_ops.add_n函数的典型用法代码示例。如果您正苦于以下问题:Python add_n函数的具体用法?Python add_n怎么用?Python add_n使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。



在下文中一共展示了add_n函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。

示例1: MYsequence_loss_by_example

 def MYsequence_loss_by_example(logits, targets, weights,
                              average_across_timesteps=True,
                              softmax_loss_function=None, name=None):
   if len(targets) != len(logits) or len(weights) != len(logits):
     raise ValueError("Lengths of logits, weights, and targets must be the same "
                      "%d, %d, %d." % (len(logits), len(weights), len(targets)))
   with ops.op_scope(logits + targets + weights, name,
                     "sequence_loss_by_example"):
     log_perp_list = []
     for logit, target, weight in zip(logits, targets, weights):
       if softmax_loss_function is None:
         # TODO(irving,ebrevdo): This reshape is needed because
         # sequence_loss_by_example is called with scalars sometimes, which
         # violates our general scalar strictness policy.
         target = array_ops.reshape(target, [-1])
         crossent = nn_ops.sparse_softmax_cross_entropy_with_logits(
             logit, target)
       else:
         crossent = softmax_loss_function(logit, target)
       print crossent, weight
       log_perp_list.append(crossent * weight)
       print log_perp_list              
     log_perps = math_ops.add_n(log_perp_list)
     if average_across_timesteps:
       total_size = math_ops.add_n(weights)
       total_size += 1e-12  # Just to avoid division by 0 for all-0 weights.
       log_perps /= total_size
   return log_perps
开发者ID:awjuliani,项目名称:NeuralDreamVideos,代码行数:28,代码来源:model_rnn.py


示例2: MMIloss

def MMIloss(logits, targets, weights, lam, gam,
                             average_across_timesteps=True,
                             softmax_loss_function=None, name=None):
    
"""lam is lambda value(diversity penalty) of the object, gam is gamma value(length penalty) of the object
(see section 4.5.1 of Li et al)"""


  if len(targets) != len(logits) or len(weights) != len(logits):
    raise ValueError("Lengths of logits, weights, and targets must be the same "
                     "%d, %d, %d." % (len(logits), len(weights), len(targets)))
  
  with ops.op_scope(logits + targets + weights, name,
                    "sequence_loss_by_example"):
    log_perp_list = []
    for logit, target, weight in zip(logits, targets, weights):
        if softmax_loss_function is None:
        
            target = array_ops.reshape(target, [-1])
            crossent = nn_ops.sparse_softmax_cross_entropy_with_logits(
            logit, target)
        else:
            crossent = softmax_loss_function(logit, target)
        log_perp_list.append(crossent * weight)
    log_perps = math_ops.add_n(log_perp_list)
    if average_across_timesteps:
        total_size = math_ops.add_n(weights)
        total_size += 1e-12  # Just to avoid division by 0 for all-0 weights.
        log_perps /= total_size
        
       
    final_perps= log_perps - (lam)*lm_perps + (gam)*len(targets)   
    return final_perps
开发者ID:myungin,项目名称:NLP_baseline,代码行数:33,代码来源:MMIloss.py


示例3: testAddN

  def testAddN(self):
    devices = ["/cpu:0"]
    if test_util.is_gpu_available():
      devices.append("/gpu:0")
    for device in devices:
      with ops.device(device):
        # With value
        opt1 = optional_ops.Optional.from_value((1.0, 2.0))
        opt2 = optional_ops.Optional.from_value((3.0, 4.0))

        add_tensor = math_ops.add_n([opt1._variant_tensor,
                                     opt2._variant_tensor])
        add_opt = optional_ops._OptionalImpl(add_tensor, opt1.value_structure)
        self.assertAllEqual(self.evaluate(add_opt.get_value()), (4.0, 6.0))

        # Without value
        opt_none1 = optional_ops.Optional.none_from_structure(
            opt1.value_structure)
        opt_none2 = optional_ops.Optional.none_from_structure(
            opt2.value_structure)
        add_tensor = math_ops.add_n([opt_none1._variant_tensor,
                                     opt_none2._variant_tensor])
        add_opt = optional_ops._OptionalImpl(add_tensor,
                                             opt_none1.value_structure)
        self.assertFalse(self.evaluate(add_opt.has_value()))
开发者ID:kylin9872,项目名称:tensorflow,代码行数:25,代码来源:optional_test.py


示例4: testSimpleSwap

  def testSimpleSwap(self):
    """Check that the swap annotations are followed."""
    a = variables.Variable(10, name='a')
    b = variables.Variable(20, name='b')
    c = math_ops.add_n([a, b], name='c')
    d = math_ops.add_n([b, c], name='d')
    train_op = ops.get_collection_ref(ops.GraphKeys.TRAIN_OP)
    train_op.append(d)

    d.op.node_def.attr['_swap_to_host'].i = 0

    mg = meta_graph.create_meta_graph_def(graph=ops.get_default_graph())
    graph_size = len(mg.graph_def.node)

    rewriter_config = rewriter_config_pb2.RewriterConfig(
        disable_model_pruning=True,
        memory_optimization=rewriter_config_pb2.RewriterConfig.MANUAL)
    graph = tf_optimizer.OptimizeGraph(rewriter_config, mg)

    self.assertEqual(len(graph.node), graph_size + 2)
    self.assertTrue(
        set([node.name for node in graph.node]) > set(
            ['a', 'b', 'c', 'd', 'swap_in_d_0', 'swap_out_d_0']))
    for node in graph.node:
      if node.name == 'swap_in_d_0':
        self.assertEqual('swap_out_d_0', node.input[0])
        self.assertEqual('^b/read', node.input[1])
      elif node.name == 'swap_out_d_0':
        self.assertEqual('b/read', node.input[0])
      elif node.name == 'd':
        self.assertEqual('swap_in_d_0', node.input[0])
        self.assertEqual('c', node.input[1])
开发者ID:Dr4KK,项目名称:tensorflow,代码行数:32,代码来源:memory_optimizer_test.py


示例5: testSimpleSwap

  def testSimpleSwap(self):
    """Check that the swap annotations are followed."""
    a = constant_op.constant(10, name='a')
    b = constant_op.constant(20, name='b')
    c = math_ops.add_n([a, b], name='c')
    d = math_ops.add_n([b, c], name='d')
    train_op = ops.get_collection_ref(ops.GraphKeys.TRAIN_OP)
    train_op.append(d)

    d.op.node_def.attr['_swap_to_host'].i = 0

    mg = meta_graph.create_meta_graph_def(graph=ops.get_default_graph())

    rewriter_config = rewriter_config_pb2.RewriterConfig(
        memory_optimization=rewriter_config_pb2.RewriterConfig.MANUAL)
    graph = tf_optimizer.OptimizeGraph(rewriter_config, mg)

    self.assertEqual(len(graph.node), 6)
    self.assertItemsEqual([node.name for node in graph.node], [
        'a',
        'b',
        'c',
        'd',
        'swap_in_d_0',
        'swap_out_d_0',
    ])
    for node in graph.node:
      if node.name == 'swap_in_d_0':
        self.assertEqual('swap_out_d_0', node.input[0])
        self.assertEqual('^b', node.input[1])
      elif node.name == 'swap_out_d_0':
        self.assertEqual('b', node.input[0])
      elif node.name == 'd':
        self.assertEqual('swap_in_d_0', node.input[0])
        self.assertEqual('c', node.input[1])
开发者ID:AutumnQYN,项目名称:tensorflow,代码行数:35,代码来源:memory_optimizer_test.py


示例6: testBasicMemory

  def testBasicMemory(self):
    """Make sure arguments can be passed correctly."""
    with test_util.device(use_gpu=False):
      a = constant_op.constant(10, name="a")
      b = constant_op.constant(20, name="b")
      c = math_ops.add_n([a, b], name="c")
      d = math_ops.add_n([b, c], name="d")
      train_op = ops.get_collection_ref(ops.GraphKeys.TRAIN_OP)
      train_op.append(d)
      mg = meta_graph.create_meta_graph_def(graph=ops.get_default_graph())

    report = cost_analyzer.GenerateMemoryReport(mg)

    # Print the report to make it easier to debug
    print("{}".format(report))

    # Check the report
    self.assertTrue(
        "Peak usage for device /job:localhost/replica:0/task:0/device:CPU:0: "
        "16 bytes"
        in report)
    self.assertTrue("  a:0 uses 4 bytes" in report)
    self.assertTrue("  b:0 uses 4 bytes" in report)
    self.assertTrue("  c:0 uses 4 bytes" in report)
    self.assertTrue("  d:0 uses 4 bytes" in report)
开发者ID:adit-chandra,项目名称:tensorflow,代码行数:25,代码来源:cost_analyzer_test.py


示例7: surrogate_loss

def surrogate_loss(sample_losses,
                   stochastic_tensors=None,
                   name="SurrogateLoss"):
  """Surrogate loss for stochastic graphs.

  This function will call `loss_fn` on each `StochasticTensor`
  upstream of `sample_losses`, passing the losses that it influenced.

  Note that currently `surrogate_loss` does not work with `StochasticTensor`s
  instantiated in `while_loop`s or other control structures.

  Args:
    sample_losses: a list or tuple of final losses. Each loss should be per
      example in the batch (and possibly per sample); that is, it should have
      dimensionality of 1 or greater. All losses should have the same shape.
    stochastic_tensors: a list of `StochasticTensor`s to add loss terms for.
      If None, defaults to all `StochasticTensor`s in the graph upstream of
      the `Tensor`s in `sample_losses`.
    name: the name with which to prepend created ops.

  Returns:
    `Tensor` loss, which is the sum of `sample_losses` and the
    `loss_fn`s returned by the `StochasticTensor`s.

  Raises:
    TypeError: if `sample_losses` is not a list or tuple, or if its elements
      are not `Tensor`s.
    ValueError: if any loss in `sample_losses` does not have dimensionality 1
      or greater.
  """
  with ops.op_scope(sample_losses, name):
    fixed_losses = []
    if not isinstance(sample_losses, (list, tuple)):
      raise TypeError("sample_losses must be a list or tuple")
    for loss in sample_losses:
      if not isinstance(loss, ops.Tensor):
        raise TypeError("loss is not a Tensor: %s" % loss)
      ndims = loss.get_shape().ndims
      if not (ndims is not None and ndims >= 1):
        raise ValueError("loss must have dimensionality 1 or greater: %s" %
                         loss)
      fixed_losses.append(array_ops.stop_gradient(loss))

    stoch_dependencies_map = _stochastic_dependencies_map(
        fixed_losses, stochastic_tensors=stochastic_tensors)
    if not stoch_dependencies_map:
      logging.warn(
          "No collection of Stochastic Tensors found for current graph.")
      return math_ops.add_n(sample_losses)

    # Iterate through all of the stochastic dependencies, adding
    # surrogate terms where necessary.
    sample_losses = [ops.convert_to_tensor(loss) for loss in sample_losses]
    loss_terms = sample_losses
    for (stoch_node, dependent_losses) in stoch_dependencies_map.items():
      loss_term = stoch_node.loss(list(dependent_losses))
      if loss_term is not None:
        loss_terms.append(loss_term)

    return math_ops.add_n(loss_terms)
开发者ID:10imaging,项目名称:tensorflow,代码行数:60,代码来源:stochastic_graph.py


示例8: sequence_loss_by_example

def sequence_loss_by_example(logits, targets, weights, average_across_time=True, scope=None):
  '''
  A simple version of weighted sequence loss measured in sequence
  :param logits:
  :param targets:
  :param weights:
  :param average_across_time:
  :param softmax_loss_function:
  :param scope:
  :return:
  '''
  if len(logits) != len(targets) or len(weights) != len(logits):
    raise ValueError("Lenghts of logits, weights and target must be same "
                     "%d, %d, %d" %len(logits), len(weights), len(targets))

  with tf.variable_scope(scope or "sequence_loss_by_example"):
    sequence_loss_list = []
    for logit, target, weight in zip(logits, targets, weights):
      loss = tf.nn.sparse_softmax_cross_entropy_with_logits(logit,target)
      # tensorflow !!!
      sequence_loss_list.append(loss*weight)
    sequence_loss = math_ops.add_n(sequence_loss_list)
    if average_across_time:
      total_weight = math_ops.add_n(weights) + 1e-12
      final_loss = sequence_loss/total_weight
    else:
      final_loss = sequence_loss
    return final_loss
开发者ID:Kaushikpatnaik,项目名称:LSTMModelRepo,代码行数:28,代码来源:model.py


示例9: _MultiDeviceAddN

def _MultiDeviceAddN(tensor_list, gradient_uid):
  """Adds tensors from potentially multiple devices."""
  # Basic function structure comes from control_flow_ops.group().
  # Sort tensors according to their devices.
  tensors_on_device = collections.defaultdict(lambda: [])
  for tensor in tensor_list:
    tensors_on_device[tensor.device].append(tensor)

  # For each device, add the tensors on that device first.
  # Then gather the partial sums from multiple devices.
  # TODO(sjhwang): Create hierarchical aggregation tree as pbar's suggestion.
  # E.g., aggregate per GPU, then per task, and so on.
  summands = []

  def DeviceKey(dev):
    return "" if dev is None else dev

  for dev in sorted(six.iterkeys(tensors_on_device), key=DeviceKey):
    tensors = tensors_on_device[dev]
    with ops._colocate_with_for_gradient(  # pylint: disable=protected-access
        tensors[0].op,
        gradient_uid,
        ignore_existing=True):
      summands.append(math_ops.add_n(tensors))

  return math_ops.add_n(summands)
开发者ID:terrytangyuan,项目名称:tensorflow,代码行数:26,代码来源:gradients_util.py


示例10: _full_batch_training_op

  def _full_batch_training_op(self, inputs, cluster_idx_list, cluster_centers):
    """Creates an op for training for full batch case.

    Args:
      inputs: list of input Tensors.
      cluster_idx_list: A vector (or list of vectors). Each element in the
        vector corresponds to an input row in 'inp' and specifies the cluster id
        corresponding to the input.
      cluster_centers: Tensor Ref of cluster centers.

    Returns:
      An op for doing an update of mini-batch k-means.
    """
    cluster_sums = []
    cluster_counts = []
    epsilon = constant_op.constant(1e-6, dtype=inputs[0].dtype)
    for inp, cluster_idx in zip(inputs, cluster_idx_list):
      with ops.colocate_with(inp):
        cluster_sums.append(
            math_ops.unsorted_segment_sum(inp, cluster_idx, self._num_clusters))
        cluster_counts.append(
            math_ops.unsorted_segment_sum(
                array_ops.reshape(
                    array_ops.ones(
                        array_ops.reshape(array_ops.shape(inp)[0], [-1])),
                    [-1, 1]), cluster_idx, self._num_clusters))
    with ops.colocate_with(cluster_centers):
      new_clusters_centers = math_ops.add_n(cluster_sums) / (math_ops.cast(
          math_ops.add_n(cluster_counts), cluster_sums[0].dtype) + epsilon)
      if self._clusters_l2_normalized():
        new_clusters_centers = nn_impl.l2_normalize(new_clusters_centers, dim=1)
    return state_ops.assign(cluster_centers, new_clusters_centers)
开发者ID:AliMiraftab,项目名称:tensorflow,代码行数:32,代码来源:clustering_ops.py


示例11: testFloat

 def testFloat(self):
   np.random.seed(12345)
   for num_inputs in range(1, 10):
     x = [np.random.random((1, 2, 3, 4, 5)) - 0.5 for _ in range(num_inputs)]
     tf_x = ops.convert_n_to_tensor(x)
     with self.test_session(use_gpu=True):
       self.assertAllClose(sum(x), math_ops.add_n(tf_x).eval())
       self.assertAllClose(x[0] * num_inputs,
                           math_ops.add_n([tf_x[0]] * num_inputs).eval())
开发者ID:LongJun123456,项目名称:tensorflow,代码行数:9,代码来源:math_ops_test.py


示例12: testIndexedSlices

 def testIndexedSlices(self):
   slc = ops.IndexedSlices(
       array_ops.constant([1, 2], shape=[1, 2]), array_ops.constant([1]),
       array_ops.constant([2, 2]))
   slc_as_dense = np.array([[0, 0], [1, 2]])
   with self.test_session(use_gpu=True):
     # add_n currently always converts IndexedSlices to dense
     self.assertAllEqual(slc_as_dense, math_ops.add_n([slc]).eval())
     self.assertAllEqual(2 * slc_as_dense, math_ops.add_n([slc, slc]).eval())
开发者ID:Wajih-O,项目名称:tensorflow,代码行数:9,代码来源:math_ops_test.py


示例13: testPartials

  def testPartials(self):
    """Test that previously revealed a bug in buffer forwarding for AddN."""
    partials = []
    for _ in range(98):
      partials.append(math_ops.add_n([constant_op.constant(1)]))
    partials.append(
        math_ops.add_n([constant_op.constant(1), constant_op.constant(1)]))

    res = math_ops.add_n(partials) + constant_op.constant(0)
    with self.test_session(use_gpu=True):
      self.assertAllEqual(res.eval(), 100)
开发者ID:xylary,项目名称:tensorflow,代码行数:11,代码来源:math_ops_test.py


示例14: sequence_loss_by_example

def sequence_loss_by_example(logits, targets, weights, num_decoder_symbols,
                             average_across_timesteps=True,
                             softmax_loss_function=None, name=None):
  """Weighted cross-entropy loss for a sequence of logits (per example).

  Args:
    logits: list of 2D Tensors of shape [batch_size x num_decoder_symbols].
    targets: list of 1D batch-sized int32 Tensors of the same length as logits.
    weights: list of 1D batch-sized float-Tensors of the same length as logits.
    num_decoder_symbols: integer, number of decoder symbols (output classes).
    average_across_timesteps: If set, divide the returned cost by the total
      label weight.
    softmax_loss_function: function (inputs-batch, labels-batch) -> loss-batch
      to be used instead of the standard softmax (the default if this is None).
    name: optional name for this operation, default: "sequence_loss_by_example".

  Returns:
    1D batch-sized float Tensor: the log-perplexity for each sequence.

  Raises:
    ValueError: if len(logits) is different from len(targets) or len(weights).
  """
  if len(targets) != len(logits) or len(weights) != len(logits):
    raise ValueError("Lengths of logits, weights, and targets must be the same "
                     "%d, %d, %d." % (len(logits), len(weights), len(targets)))
  with ops.op_scope(logits + targets + weights, name,
                    "sequence_loss_by_example"):
    batch_size = array_ops.shape(targets[0])[0]
    log_perp_list = []
    length = batch_size * num_decoder_symbols
    for i in xrange(len(logits)):
      if softmax_loss_function is None:
        # TODO(lukaszkaiser): There is no SparseCrossEntropy in TensorFlow, so
        # we need to first cast targets into a dense representation, and as
        # SparseToDense does not accept batched inputs, we need to do this by
        # re-indexing and re-sizing. When TensorFlow adds SparseCrossEntropy,
        # rewrite this method.
        indices = targets[i] + num_decoder_symbols * math_ops.range(batch_size)
        with ops.device("/cpu:0"):  # Sparse-to-dense must be on CPU for now.
          dense = sparse_ops.sparse_to_dense(
              indices, array_ops.expand_dims(length, 0), 1.0,
              0.0)
        target = array_ops.reshape(dense, [-1, num_decoder_symbols])
        crossent = nn_ops.softmax_cross_entropy_with_logits(
            logits[i], target, name="SequenceLoss/CrossEntropy{0}".format(i))
      else:
        crossent = softmax_loss_function(logits[i], targets[i])
      log_perp_list.append(crossent * weights[i])
    log_perps = math_ops.add_n(log_perp_list)
    if average_across_timesteps:
      total_size = math_ops.add_n(weights)
      total_size += 1e-12  # Just to avoid division by 0 for all-0 weights.
      log_perps /= total_size
  return log_perps
开发者ID:hlt-mt,项目名称:tensorflow,代码行数:54,代码来源:seq2seq.py


示例15: _reduce

 def _reduce(self, aggregation, value, destinations):
   if not isinstance(value, values.MapOutput):
     return value
   l = value.get()
   assert l
   with ops.device(self._device):
     if aggregation == vs.VariableAggregation.SUM:
       return math_ops.add_n(l)
     elif aggregation == vs.VariableAggregation.MEAN:
       return math_ops.add_n(l) / len(l)
     else:
       assert False
开发者ID:ChristinaEricka,项目名称:tensorflow,代码行数:12,代码来源:one_device_strategy.py


示例16: _reduce

 def _reduce(self, method_string, value, destinations):
   if not isinstance(value, values.MapOutput):
     return value
   l = value.get()
   assert l
   with ops.device(self._device):
     if method_string == "sum":
       return math_ops.add_n(l)
     elif method_string == "mean":
       return math_ops.add_n(l) / len(l)
     else:
       assert False
开发者ID:Jackiefan,项目名称:tensorflow,代码行数:12,代码来源:one_device_strategy.py


示例17: testInt

 def testInt(self):
   np.random.seed(54321)
   for num_inputs in range(1, 10):
     x = [
         np.random.randint(-128, 128, (5, 4, 3, 2, 1))
         for _ in range(num_inputs)
     ]
     tf_x = ops.convert_n_to_tensor(x)
     with self.test_session(use_gpu=True):
       self.assertAllEqual(sum(x), math_ops.add_n(tf_x).eval())
       self.assertAllEqual(x[0] * num_inputs,
                           math_ops.add_n([tf_x[0]] * num_inputs).eval())
开发者ID:LongJun123456,项目名称:tensorflow,代码行数:12,代码来源:math_ops_test.py


示例18: _define_maximization_operation

  def _define_maximization_operation(self, num_batches):
    """Maximization operations."""
    # TODO(xavigonzalvo): some of these operations could be moved to C++.
    # Compute the effective number of data points assigned to component k.
    with ops.control_dependencies(self._w):
      points_in_k = array_ops.squeeze(
          math_ops.add_n(self._points_in_k), axis=[0])
      # Update alpha.
      if 'w' in self._params:
        final_points_in_k = points_in_k / num_batches
        num_examples = math_ops.cast(math_ops.reduce_sum(final_points_in_k),
                                     dtypes.float32)
        self._alpha_op = self._alpha.assign(final_points_in_k /
                                            (num_examples + MEPS))
      else:
        self._alpha_op = control_flow_ops.no_op()
      self._train_ops = [self._alpha_op]

      # Update means.
      points_in_k_expanded = array_ops.reshape(points_in_k,
                                               [self._num_classes, 1, 1])
      if 'm' in self._params:
        self._means_op = self._means.assign(
            math_ops.div(
                math_ops.add_n(self._w_mul_x), points_in_k_expanded + MEPS))
      else:
        self._means_op = control_flow_ops.no_op()
      # means are (num_classes x 1 x dims)

      # Update covariances.
      with ops.control_dependencies([self._means_op]):
        b = math_ops.add_n(self._w_mul_x2) / (points_in_k_expanded + MEPS)
        new_covs = []
        for k in range(self._num_classes):
          mean = self._means.value()[k, :, :]
          square_mean = math_ops.matmul(mean, mean, transpose_a=True)
          new_cov = b[k, :, :] - square_mean + self._min_var
          if self._covariance_type == FULL_COVARIANCE:
            new_covs.append(array_ops.expand_dims(new_cov, 0))
          elif self._covariance_type == DIAG_COVARIANCE:
            new_covs.append(
                array_ops.expand_dims(array_ops.diag_part(new_cov), 0))
        new_covs = array_ops.concat(new_covs, 0)
        if 'c' in self._params:
          # Train operations don't need to take care of the means
          # because covariances already depend on it.
          with ops.control_dependencies([self._means_op, new_covs]):
            self._train_ops.append(
                state_ops.assign(
                    self._covs, new_covs, validate_shape=False))
开发者ID:Albert-Z-Guo,项目名称:tensorflow,代码行数:50,代码来源:gmm_ops.py


示例19: _testAllReduce

 def _testAllReduce(self, num_workers, num_gpus, shape, build_f):
   # Use local CPU as device for all inputs.
   num_devices = num_workers * num_gpus
   dev_list = ["/replica:0/task:0/device:CPU:0"
               for _ in range(num_devices)]
   with self.cached_session():
     input_tensors = self._buildInitialVars(shape, dev_list)
     un_op = lambda x: math_ops.div(
         x, constant_op.constant(num_devices, dtype=types_pb2.DT_FLOAT))
     simple_sum = math_ops.add_n(input_tensors)
     simple_sum.op.run()
     output_tensors = build_f(input_tensors, un_op)
     sum_reduced = math_ops.add_n(output_tensors)
     sum_reduced.op.run()
     self.assertAllClose(sum_reduced.eval(), self.evaluate(simple_sum))
开发者ID:Wajih-O,项目名称:tensorflow,代码行数:15,代码来源:all_reduce_test.py


示例20: _reduce

  def _reduce(self, aggregation, value, destinations):
    graph = ops.get_default_graph()
    cf_context = graph._get_control_flow_context()  # pylint: disable=protected-access
    # If we're inside the ReplicateContext, reduction should be done using
    # CrossReplicaSum while outside we can directly use an add_n op.
    while cf_context:
      if isinstance(cf_context, tpu.TPUReplicateContext):
        if aggregation == vs.VariableAggregation.MEAN:
          # TODO(jhseu):  Revisit once we support model-parallelism.
          value *= (1. / self.num_towers)
        return tpu_ops.cross_replica_sum(value)
      cf_context = cf_context.outer_context

    # Validate that the destination is same as the host device
    # Note we don't do this when in replicate context as the reduction is
    # performed on the TPU device itself.
    devices = cross_tower_ops_lib.get_devices_from(destinations)
    if len(devices) == 1:
      assert device_util.canonicalize(devices[0]) == device_util.canonicalize(
          self._host)
    else:
      raise ValueError('Multiple devices are not supported for TPUStrategy')

    output = math_ops.add_n(value)
    if aggregation == vs.VariableAggregation.MEAN:
      return output * (1. / len(value))
    return output
开发者ID:mrlittlepig,项目名称:tensorflow,代码行数:27,代码来源:tpu_strategy.py



注:本文中的tensorflow.python.ops.math_ops.add_n函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。


鲜花

握手

雷人

路过

鸡蛋
该文章已有0人参与评论

请发表评论

全部评论

专题导读
上一篇:
Python math_ops.approximate_equal函数代码示例发布时间:2022-05-27
下一篇:
Python math_ops.add函数代码示例发布时间:2022-05-27
热门推荐
阅读排行榜

扫描微信二维码

查看手机版网站

随时了解更新最新资讯

139-2527-9053

在线客服(服务时间 9:00~18:00)

在线QQ客服
地址:深圳市南山区西丽大学城创智工业园
电邮:jeky_zhao#qq.com
移动电话:139-2527-9053

Powered by 互联科技 X3.4© 2001-2213 极客世界.|Sitemap