• 设为首页
  • 点击收藏
  • 手机版
    手机扫一扫访问
    迪恩网络手机版
  • 关注官方公众号
    微信扫一扫关注
    公众号

Python math_ops.reduce_min函数代码示例

原作者: [db:作者] 来自: [db:来源] 收藏 邀请

本文整理汇总了Python中tensorflow.python.ops.math_ops.reduce_min函数的典型用法代码示例。如果您正苦于以下问题:Python reduce_min函数的具体用法?Python reduce_min怎么用?Python reduce_min使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。



在下文中一共展示了reduce_min函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。

示例1: testNegativeBinomialSample

  def testNegativeBinomialSample(self):
    with self.cached_session() as sess:
      probs = [.3, .9]
      total_count = [4., 11.]
      n = int(100e3)
      negbinom = negative_binomial.NegativeBinomial(
          total_count=total_count, probs=probs)

      samples = negbinom.sample(n, seed=12345)
      self.assertEqual([n, 2], samples.get_shape())

      sample_mean = math_ops.reduce_mean(samples, axis=0)
      sample_var = math_ops.reduce_mean(
          (samples - sample_mean[array_ops.newaxis, ...])**2., axis=0)
      sample_min = math_ops.reduce_min(samples)
      [sample_mean_, sample_var_, sample_min_] = sess.run([
          sample_mean, sample_var, sample_min])
      self.assertAllEqual(np.ones(sample_min_.shape, dtype=np.bool),
                          sample_min_ >= 0.0)
      for i in range(2):
        self.assertAllClose(sample_mean_[i],
                            stats.nbinom.mean(total_count[i], 1 - probs[i]),
                            atol=0.,
                            rtol=.02)
        self.assertAllClose(sample_var_[i],
                            stats.nbinom.var(total_count[i], 1 - probs[i]),
                            atol=0.,
                            rtol=.02)
开发者ID:Ajaycs99,项目名称:tensorflow,代码行数:28,代码来源:negative_binomial_test.py


示例2: grow_tree_from_stats_summaries

 def grow_tree_from_stats_summaries(stats_summary_list):
   """Updates ensemble based on the best gains from stats summaries."""
   (node_ids_per_feature, gains_list, thresholds_list,
    left_node_contribs_list, right_node_contribs_list) = (
        boosted_trees_ops.calculate_best_gains_per_feature(
            node_id_range=array_ops.stack([
                math_ops.reduce_min(node_ids),
                math_ops.reduce_max(node_ids)
            ]),
            stats_summary_list=stats_summary_list,
            l1=tree_hparams.l1,
            l2=tree_hparams.l2,
            tree_complexity=tree_hparams.tree_complexity,
            max_splits=max_splits))
   grow_op = boosted_trees_ops.update_ensemble(
       # Confirm if local_tree_ensemble or tree_ensemble should be used.
       tree_ensemble.resource_handle,
       feature_ids=math_ops.range(0, num_features, dtype=dtypes.int32),
       node_ids=node_ids_per_feature,
       gains=gains_list,
       thresholds=thresholds_list,
       left_node_contribs=left_node_contribs_list,
       right_node_contribs=right_node_contribs_list,
       learning_rate=tree_hparams.learning_rate,
       max_depth=tree_hparams.max_depth,
       pruning_mode=boosted_trees_ops.PruningMode.NO_PRUNING)
   return grow_op
开发者ID:syed-ahmed,项目名称:tensorflow,代码行数:27,代码来源:boosted_trees.py


示例3: testGradient4

 def testGradient4(self):
   s = [2, 3, 4, 2]
   x = np.arange(1.0, 49.0).reshape(s).astype(np.float64)
   with self.test_session():
     t = ops.convert_to_tensor(x)
     su = math_ops.reduce_min(t)
     jacob_t, jacob_n = gradient_checker.compute_gradient(
         t, s, su, [1], x_init_value=x, delta=1)
   self.assertAllClose(jacob_t, jacob_n, rtol=1e-8, atol=1e-8)
开发者ID:Immexxx,项目名称:tensorflow,代码行数:9,代码来源:reduction_ops_test.py


示例4: my_rnn

def my_rnn(alphabetEnc, cell, inputs, initial_state=None, dtype=None,
        sequence_length=None, scope=None):

  if not isinstance(cell, rnn_cell.RNNCell):
    raise TypeError("cell must be an instance of RNNCell")
  if not isinstance(inputs, list):
    raise TypeError("inputs must be a list")
  if not inputs:
    raise ValueError("inputs must not be empty")

  outputs = []
  with vs.variable_scope(scope or "RNN"):
    fixed_batch_size = inputs[0].get_shape().with_rank_at_least(1)[0]
    if fixed_batch_size.value:
      batch_size = fixed_batch_size.value
    else:
      batch_size = array_ops.shape(inputs[0])[0]
    if initial_state is not None:
      state = initial_state
    else:
      if not dtype:
        raise ValueError("If no initial_state is provided, dtype must be.")
      state = cell.zero_state(batch_size, dtype)

    if sequence_length is not None:
      sequence_length = math_ops.to_int32(sequence_length)

    if sequence_length:  # Prepare variables
      zero_output = array_ops.zeros(
          array_ops.pack([batch_size, cell.output_size]), inputs[0].dtype)
      zero_output.set_shape(
          tensor_shape.TensorShape([fixed_batch_size.value, cell.output_size]))


      min_sequence_length = math_ops.reduce_min(sequence_length)
      max_sequence_length = math_ops.reduce_max(sequence_length)


    for time, input_ in enumerate(inputs):
      if time > 0: vs.get_variable_scope().reuse_variables()
      # pylint: disable=cell-var-from-loop
      call_cell = lambda: cell([ input_ , alphabetEnc[time] ], state)
      # pylint: enable=cell-var-from-loop
      if sequence_length:

        (output, state) = _rnn_step(
            time, sequence_length, min_sequence_length, max_sequence_length,
            zero_output, state, call_cell)
      else:

        (output, state) = call_cell()

      outputs.append(output)

    return (outputs, state)
开发者ID:chagge,项目名称:repository,代码行数:55,代码来源:rnn.py


示例5: initialize_graph

  def initialize_graph(self, features, update_statistics=True):
    """Create any ops needed to provide input statistics.

    Should be called before statistics are requested.

    Args:
      features: A dictionary, the output of a `TimeSeriesInputFn` (with keys
          TrainEvalFeatures.TIMES and TrainEvalFeatures.VALUES).
      update_statistics: Whether `features` should be used to update adaptive
          statistics. Typically True for training and false for evaluation.
    Returns:
      An InputStatistics object composed of Variables, which will be updated
      based on mini-batches of data if requested.
    """
    if (TrainEvalFeatures.TIMES in features
        and TrainEvalFeatures.VALUES in features):
      times = features[TrainEvalFeatures.TIMES]
      values = features[TrainEvalFeatures.VALUES]
    else:
      # times and values may not be available, for example during prediction. We
      # still need to retrieve our variables so that they can be read from, even
      # if we're not going to update them.
      times = None
      values = None
    # Create/retrieve variables representing input statistics, initialized
    # without data to avoid deadlocking if variables are initialized before
    # queue runners are started.
    with variable_scope.variable_scope("input_statistics", use_resource=True):
      statistics = self._create_variable_statistics_object()
    with variable_scope.variable_scope(
        "input_statistics_auxiliary", use_resource=True):
      # Secondary statistics, necessary for the incremental computation of the
      # primary statistics (e.g. counts and sums for computing a mean
      # incrementally).
      auxiliary_variables = self._AdaptiveInputAuxiliaryStatistics(
          num_features=self._num_features, dtype=self._dtype)
    if update_statistics and times is not None and values is not None:
      # If we have times and values from mini-batch input, create update ops to
      # take the new data into account.
      assign_op = self._update_statistics_from_mini_batch(
          statistics, auxiliary_variables, times, values)
      with ops.control_dependencies([assign_op]):
        stat_variables = nest.pack_sequence_as(statistics, [
            array_ops.identity(tensor) for tensor in nest.flatten(statistics)
        ])
        # Since start time updates have a race condition, ensure that the
        # reported start time is at least as low as the lowest time in this
        # mini-batch. The start time should converge on the correct value
        # eventually even with the race condition, but for example state space
        # models have an assertion which could fail without this
        # post-processing.
        return stat_variables._replace(start_time=gen_math_ops.minimum(
            stat_variables.start_time, math_ops.reduce_min(times)))
    else:
      return statistics
开发者ID:AnishShah,项目名称:tensorflow,代码行数:55,代码来源:math_utils.py


示例6: testLargeFeed

 def testLargeFeed(self):
   server = self._cached_server
   with session.Session(server.target, config=self._useRPCConfig()) as sess:
     feed_val = np.empty([10000, 3000], dtype=np.float32)
     feed_val.fill(0.5)
     p = array_ops.placeholder(dtypes.float32, shape=[10000, 3000])
     min_t = math_ops.reduce_min(p)
     max_t = math_ops.reduce_max(p)
     min_val, max_val = sess.run([min_t, max_t], feed_dict={p: feed_val})
     self.assertEqual(0.5, min_val)
     self.assertEqual(0.5, max_val)
开发者ID:JonathanRaiman,项目名称:tensorflow,代码行数:11,代码来源:server_lib_test.py


示例7: _ones_diag

  def _ones_diag(self):
    """Returns the diagonal of this operator as all ones."""
    if self.shape.is_fully_defined():
      d_shape = self.batch_shape.concatenate(
          [min(self.domain_dimension.value, self.range_dimension.value)])
    else:
      d_shape = array_ops.concat(
          [self.batch_shape_tensor(),
           [math_ops.reduce_min(self.shape_tensor()[-2:])]], axis=0)

    return array_ops.ones(shape=d_shape, dtype=self.dtype)
开发者ID:LugarkPirog,项目名称:tensorflow,代码行数:11,代码来源:linear_operator_identity.py


示例8: refresh_shortlist

 def refresh_shortlist():
   """Update the shortlist with the highest scores in id_to_score."""
   new_scores, new_ids = nn_ops.top_k(self.id_to_score, self.shortlist_size)
   smallest_new_score = math_ops.reduce_min(new_scores)
   new_length = math_ops.reduce_sum(
       math_ops.to_int32(math_ops.greater(new_scores, dtypes.float32.min)))
   u1 = self.sl_ids.assign(
       math_ops.to_int64(array_ops.concat([[new_length], new_ids], 0)))
   u2 = self.sl_scores.assign(
       array_ops.concat([[smallest_new_score], new_scores], 0))
   self.last_ops = [u1, u2]
   return control_flow_ops.group(u1, u2)
开发者ID:AliMiraftab,项目名称:tensorflow,代码行数:12,代码来源:topn.py


示例9: element_to_bucket_id

    def element_to_bucket_id(*args):
      """Return int64 id of the length bucket for this element."""
      seq_length = element_length_func(*args)

      boundaries = list(bucket_boundaries)
      buckets_min = [np.iinfo(np.int32).min] + boundaries
      buckets_max = boundaries + [np.iinfo(np.int32).max]
      conditions_c = math_ops.logical_and(
          math_ops.less_equal(buckets_min, seq_length),
          math_ops.less(seq_length, buckets_max))
      bucket_id = math_ops.reduce_min(array_ops.where(conditions_c))

      return bucket_id
开发者ID:bunbutter,项目名称:tensorflow,代码行数:13,代码来源:grouping.py


示例10: func_body

 def func_body(iteration, gt_cluster_score):
   """Per each cluster, compute the average travel distance."""
   mask = math_ops.equal(labels, unique_class_ids[iteration])
   this_cluster_ids = array_ops.where(mask)
   pairwise_distances_subset = array_ops.transpose(
       array_ops.gather(
           array_ops.transpose(
               array_ops.gather(pairwise_distances, this_cluster_ids)),
           this_cluster_ids))
   this_cluster_score = -1.0 * math_ops.reduce_min(
       math_ops.reduce_sum(
           pairwise_distances_subset, axis=0))
   return iteration + 1, gt_cluster_score + this_cluster_score
开发者ID:AndrewTwinz,项目名称:tensorflow,代码行数:13,代码来源:metric_loss_ops.py


示例11: compute_facility_energy

def compute_facility_energy(pairwise_distances, centroid_ids):
  """Compute the average travel distance to the assigned centroid.

  Args:
    pairwise_distances: 2-D Tensor of pairwise distances.
    centroid_ids: 1-D Tensor of indices.

  Returns:
    facility_energy: dtypes.float32 scalar.
  """
  return -1.0 * math_ops.reduce_sum(
      math_ops.reduce_min(
          array_ops.gather(pairwise_distances, centroid_ids), axis=0))
开发者ID:AndrewTwinz,项目名称:tensorflow,代码行数:13,代码来源:metric_loss_ops.py


示例12: _compare

 def _compare(self, x, reduction_axes, keep_dims, use_gpu=False):
   np_ans = x
   if reduction_axes is None:
     np_ans = np.amin(np_ans, keepdims=keep_dims)
   else:
     for ra in reduction_axes[::-1]:
       np_ans = np.amin(np_ans, axis=ra, keepdims=keep_dims)
   with self.test_session(use_gpu=use_gpu):
     if reduction_axes is not None:
       reduction_axes = np.array(reduction_axes).astype(np.int32)
     tf_ans = math_ops.reduce_min(x, reduction_axes, keep_dims)
     out = tf_ans.eval()
   self.assertAllClose(np_ans, out)
   self.assertShapeEqual(np_ans, tf_ans)
开发者ID:Immexxx,项目名称:tensorflow,代码行数:14,代码来源:reduction_ops_test.py


示例13: grow_tree

  def grow_tree(self, stats_summaries_list, feature_ids_list,
                last_layer_nodes_range):
    # For not in memory situation, we need to accumulate enough of batches first
    # before proceeding with building a tree layer.
    max_splits = _get_max_splits(self._tree_hparams)

    # Prepare accumulators.
    accumulators = []
    dependencies = []
    for i, feature_ids in enumerate(feature_ids_list):
      stats_summaries = stats_summaries_list[i]
      accumulator = data_flow_ops.ConditionalAccumulator(
          dtype=dtypes.float32,
          # The stats consist of grads and hessians (the last dimension).
          shape=[len(feature_ids), max_splits, self._bucket_size_list[i], 2],
          shared_name='numeric_stats_summary_accumulator_' + str(i))
      accumulators.append(accumulator)

      apply_grad = accumulator.apply_grad(
          array_ops.stack(stats_summaries, axis=0), self._stamp_token)
      dependencies.append(apply_grad)

    # Grow the tree if enough batches is accumulated.
    with ops.control_dependencies(dependencies):
      if not self._is_chief:
        return control_flow_ops.no_op()

      min_accumulated = math_ops.reduce_min(
          array_ops.stack([acc.num_accumulated() for acc in accumulators]))

      def grow_tree_from_accumulated_summaries_fn():
        """Updates tree with the best layer from accumulated summaries."""
        # Take out the accumulated summaries from the accumulator and grow.
        stats_summaries_list = []
        stats_summaries_list = [
            array_ops.unstack(accumulator.take_grad(1), axis=0)
            for accumulator in accumulators
        ]
        grow_op = self._grow_tree_from_stats_summaries(
            stats_summaries_list, feature_ids_list, last_layer_nodes_range)
        return grow_op

      grow_model = control_flow_ops.cond(
          math_ops.greater_equal(min_accumulated, self._n_batches_per_layer),
          grow_tree_from_accumulated_summaries_fn,
          control_flow_ops.no_op,
          name='wait_until_n_batches_accumulated')
      return grow_model
开发者ID:ZhangXinNan,项目名称:tensorflow,代码行数:48,代码来源:boosted_trees.py


示例14: _assert_non_singular

 def _assert_non_singular(self):
   """Private default implementation of _assert_non_singular."""
   logging.warn(
       "Using (possibly slow) default implementation of assert_non_singular."
       "  Requires conversion to a dense matrix and O(N^3) operations.")
   if self._can_use_cholesky():
     return self.assert_positive_definite()
   else:
     singular_values = linalg_ops.svd(self.to_dense(), compute_uv=False)
     # TODO(langmore) Add .eig and .cond as methods.
     cond = (math_ops.reduce_max(singular_values, axis=-1) /
             math_ops.reduce_min(singular_values, axis=-1))
     return check_ops.assert_less(
         cond,
         self._max_condition_number_to_be_non_singular(),
         message="Singular matrix up to precision epsilon.")
开发者ID:aritratony,项目名称:tensorflow,代码行数:16,代码来源:linear_operator.py


示例15: grow_not_in_mem

        def grow_not_in_mem():
          """Accumulates the data and grows a layer when ready."""

          accumulators = []
          dependencies = []
          for i, feature_ids in enumerate(feature_ids_list):
            stats_summaries = stats_summaries_list[i]
            accumulator = data_flow_ops.ConditionalAccumulator(
                dtype=dtypes.float32,
                # The stats consist of grads and hessians (the last dimension).
                shape=[len(feature_ids), max_splits, bucket_size_list[i], 2],
                shared_name='numeric_stats_summary_accumulator_' + str(i))
            accumulators.append(accumulator)

            apply_grad = accumulator.apply_grad(
                array_ops.stack(stats_summaries, axis=0), stamp_token)
            dependencies.append(apply_grad)

          def grow_tree_from_accumulated_summaries_fn():
            """Updates tree with the best layer from accumulated summaries."""
            # Take out the accumulated summaries from the accumulator and grow.
            stats_summaries_list = []

            stats_summaries_list = [
                array_ops.unstack(accumulator.take_grad(1), axis=0)
                for accumulator in accumulators
            ]

            grow_op = grow_tree_from_stats_summaries(stats_summaries_list,
                                                     feature_ids_list)
            return grow_op

          with ops.control_dependencies(dependencies):
            if config.is_chief:
              min_accumulated = math_ops.reduce_min(
                  array_ops.stack(
                      [acc.num_accumulated() for acc in accumulators]))

              grow_model = control_flow_ops.cond(
                  math_ops.greater_equal(min_accumulated, n_batches_per_layer),
                  grow_tree_from_accumulated_summaries_fn,
                  control_flow_ops.no_op,
                  name='wait_until_n_batches_accumulated')

              return grow_model
            else:
              return control_flow_ops.no_op()
开发者ID:Eagle732,项目名称:tensorflow,代码行数:47,代码来源:boosted_trees.py


示例16: masked_minimum

def masked_minimum(data, mask, dim=1):
  """Computes the axis wise minimum over chosen elements.

  Args:
    data: 2-D float `Tensor` of size [n, m].
    mask: 2-D Boolean `Tensor` of size [n, m].
    dim: The dimension over which to compute the minimum.

  Returns:
    masked_minimums: N-D `Tensor`.
      The minimized dimension is of size 1 after the operation.
  """
  axis_maximums = math_ops.reduce_max(data, dim, keepdims=True)
  masked_minimums = math_ops.reduce_min(
      math_ops.multiply(data - axis_maximums, mask), dim,
      keepdims=True) + axis_maximums
  return masked_minimums
开发者ID:AndrewTwinz,项目名称:tensorflow,代码行数:17,代码来源:metric_loss_ops.py


示例17: _minimum_mean

def _minimum_mean(samples, envelope, low, name=None):
  """Returns a stochastic lower bound on the mean of a scalar distribution.

  The idea is that if the true CDF is within an `eps`-envelope of the
  empirical CDF of the samples, and the support is bounded below, then
  the mean is bounded below as well.  In symbols,

  ```none
  sup_x(|F_n(x) - F(x)|) < eps
  ```

  The 0th dimension of `samples` is interpreted as independent and
  identically distributed samples.  The remaining dimensions are
  broadcast together with `envelope` and `low`, and operated on
  separately.

  Args:
    samples: Floating-point tensor of samples from the distribution(s)
      of interest.  Entries are assumed IID across the 0th dimension.
      The other dimensions must broadcast with `envelope` and `low`.
    envelope: Floating-point tensor of sizes of admissible CDF
      envelopes (i.e., the `eps` above).
    low: Floating-point tensor of lower bounds on the distributions'
      supports.
    name: A name for this operation (optional).

  Returns:
    bound: Floating-point tensor of lower bounds on the true means.

  Raises:
    InvalidArgumentError: If some `sample` is found to be smaller than
      the corresponding `low`.
  """
  with ops.name_scope(name, "minimum_mean", [samples, envelope, low]):
    samples = ops.convert_to_tensor(samples, name="samples")
    envelope = ops.convert_to_tensor(envelope, name="envelope")
    low = ops.convert_to_tensor(low, name="low")

    xmin = math_ops.reduce_min(samples, axis=[-1])
    msg = "Given sample minimum value falls below expectations"
    check_op = check_ops.assert_greater_equal(xmin, low, message=msg)
    with ops.control_dependencies([check_op]):
      return - _do_maximum_mean(-samples, envelope, -low)
开发者ID:AndrewTwinz,项目名称:tensorflow,代码行数:43,代码来源:statistical_testing.py


示例18: _MatrixSetDiagGrad

def _MatrixSetDiagGrad(op, grad):
  input_shape = op.inputs[0].get_shape().merge_with(grad.get_shape())
  diag_shape = op.inputs[1].get_shape()
  batch_shape = input_shape[:-2].merge_with(diag_shape[:-1])
  matrix_shape = input_shape[-2:]
  if batch_shape.is_fully_defined() and matrix_shape.is_fully_defined():
    diag_shape = batch_shape.as_list() + [min(matrix_shape.as_list())]
  else:
    with ops.colocate_with(grad):
      grad_shape = array_ops.shape(grad)
      grad_rank = array_ops.rank(grad)
      batch_shape = array_ops.slice(grad_shape, [0], [grad_rank - 2])
      matrix_shape = array_ops.slice(grad_shape, [grad_rank - 2], [2])
      min_dim = math_ops.reduce_min(matrix_shape)
      diag_shape = array_ops.concat([batch_shape, [min_dim]], 0)
  grad_input = array_ops.matrix_set_diag(
      grad, array_ops.zeros(
          diag_shape, dtype=grad.dtype))
  grad_diag = array_ops.matrix_diag_part(grad)
  return (grad_input, grad_diag)
开发者ID:Jackhuang945,项目名称:tensorflow,代码行数:20,代码来源:array_grad.py


示例19: histogram

  def histogram(self, x, value_range=None, nbins=None, name=None):
    """Return histogram of values.

    Given the tensor `values`, this operation returns a rank 1 histogram
    counting the number of entries in `values` that fell into every bin. The
    bins are equal width and determined by the arguments `value_range` and
    `nbins`.

    Args:
      x: 1D numeric `Tensor` of items to count.
      value_range:  Shape [2] `Tensor`. `new_values <= value_range[0]` will be
        mapped to `hist[0]`, `values >= value_range[1]` will be mapped to
        `hist[-1]`. Must be same dtype as `x`.
      nbins:  Scalar `int32 Tensor`.  Number of histogram bins.
      name: Python `str` name prefixed to Ops created by this class.

    Returns:
      counts: 1D `Tensor` of counts, i.e.,
        `counts[i] = sum{ edges[i-1] <= values[j] < edges[i] : j }`.
      edges: 1D `Tensor` characterizing intervals used for counting.
    """
    with ops.name_scope(name, "histogram", [x]):
      x = ops.convert_to_tensor(x, name="x")
      if value_range is None:
        value_range = [math_ops.reduce_min(x), 1 + math_ops.reduce_max(x)]
      value_range = ops.convert_to_tensor(value_range, name="value_range")
      lo = value_range[0]
      hi = value_range[1]
      if nbins is None:
        nbins = math_ops.cast(hi - lo, dtypes.int32)
      delta = (hi - lo) / math_ops.cast(
          nbins, dtype=value_range.dtype.base_dtype)
      edges = math_ops.range(
          start=lo, limit=hi, delta=delta, dtype=x.dtype.base_dtype)
      counts = histogram_ops.histogram_fixed_width(
          x, value_range=value_range, nbins=nbins)
      return counts, edges
开发者ID:ahmedsaiduk,项目名称:tensorflow,代码行数:37,代码来源:test_util.py


示例20: _calculate_acceptance_probs_with_mixing

def _calculate_acceptance_probs_with_mixing(initial_probs, target_probs):
  """Calculates the acceptance probabilities and mixing ratio.

  In this case, we assume that we can *either* sample from the original data
  distribution with probability `m`, or sample from a reshaped distribution
  that comes from rejection sampling on the original distribution. This
  rejection sampling is done on a per-class basis, with `a_i` representing the
  probability of accepting data from class `i`.

  This method is based on solving the following analysis for the reshaped
  distribution:

  Let F be the probability of a rejection (on any example).
  Let p_i be the proportion of examples in the data in class i (init_probs)
  Let a_i is the rate the rejection sampler should *accept* class i
  Let t_i is the target proportion in the minibatches for class i (target_probs)

  ```
  F = sum_i(p_i * (1-a_i))
    = 1 - sum_i(p_i * a_i)     using sum_i(p_i) = 1
  ```

  An example with class `i` will be accepted if `k` rejections occur, then an
  example with class `i` is seen by the rejector, and it is accepted. This can
  be written as follows:

  ```
  t_i = sum_k=0^inf(F^k * p_i * a_i)
      = p_i * a_j / (1 - F)    using geometric series identity, since 0 <= F < 1
      = p_i * a_i / sum_j(p_j * a_j)        using F from above
  ```

  Note that the following constraints hold:
  ```
  0 <= p_i <= 1, sum_i(p_i) = 1
  0 <= a_i <= 1
  0 <= t_i <= 1, sum_i(t_i) = 1
  ```

  A solution for a_i in terms of the other variables is the following:
    ```a_i = (t_i / p_i) / max_i[t_i / p_i]```

  If we try to minimize the amount of data rejected, we get the following:

  M_max = max_i [ t_i / p_i ]
  M_min = min_i [ t_i / p_i ]

  The desired probability of accepting data if it comes from class `i`:

  a_i = (t_i/p_i - m) / (M_max - m)

  The desired probability of pulling a data element from the original dataset,
  rather than the filtered one:

  m = M_min

  Args:
    initial_probs: A Tensor of the initial probability distribution, given or
      estimated.
    target_probs: A Tensor of the corresponding classes.

  Returns:
    (A 1D Tensor with the per-class acceptance probabilities, the desired
    probability of pull from the original distribution.)
  """
  ratio_l = _get_target_to_initial_ratio(initial_probs, target_probs)
  max_ratio = math_ops.reduce_max(ratio_l)
  min_ratio = math_ops.reduce_min(ratio_l)

  # Target prob to sample from original distribution.
  m = min_ratio

  # TODO(joelshor): Simplify fraction, if possible.
  a_i = (ratio_l - m) / (max_ratio - m)
  return a_i, m
开发者ID:AnishShah,项目名称:tensorflow,代码行数:75,代码来源:resampling.py



注:本文中的tensorflow.python.ops.math_ops.reduce_min函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。


鲜花

握手

雷人

路过

鸡蛋
该文章已有0人参与评论

请发表评论

全部评论

专题导读
上一篇:
Python math_ops.reduce_prod函数代码示例发布时间:2022-05-27
下一篇:
Python math_ops.reduce_mean函数代码示例发布时间:2022-05-27
热门推荐
阅读排行榜

扫描微信二维码

查看手机版网站

随时了解更新最新资讯

139-2527-9053

在线客服(服务时间 9:00~18:00)

在线QQ客服
地址:深圳市南山区西丽大学城创智工业园
电邮:jeky_zhao#qq.com
移动电话:139-2527-9053

Powered by 互联科技 X3.4© 2001-2213 极客世界.|Sitemap