• 设为首页
  • 点击收藏
  • 手机版
    手机扫一扫访问
    迪恩网络手机版
  • 关注官方公众号
    微信扫一扫关注
    公众号

Python math_ops.reduce_prod函数代码示例

原作者: [db:作者] 来自: [db:来源] 收藏 邀请

本文整理汇总了Python中tensorflow.python.ops.math_ops.reduce_prod函数的典型用法代码示例。如果您正苦于以下问题:Python reduce_prod函数的具体用法?Python reduce_prod怎么用?Python reduce_prod使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。



在下文中一共展示了reduce_prod函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。

示例1: calculate_reshape

def calculate_reshape(original_shape, new_shape, validate=False, name=None):
  """Calculates the reshaped dimensions (replacing up to one -1 in reshape)."""
  batch_shape_static = tensor_util.constant_value_as_shape(new_shape)
  if batch_shape_static.is_fully_defined():
    return np.int32(batch_shape_static.as_list()), batch_shape_static, []
  with ops.name_scope(name, "calculate_reshape", [original_shape, new_shape]):
    original_size = math_ops.reduce_prod(original_shape)
    implicit_dim = math_ops.equal(new_shape, -1)
    size_implicit_dim = (
        original_size // math_ops.maximum(1, -math_ops.reduce_prod(new_shape)))
    new_ndims = array_ops.shape(new_shape)
    expanded_new_shape = array_ops.where(  # Assumes exactly one `-1`.
        implicit_dim, array_ops.fill(new_ndims, size_implicit_dim), new_shape)
    validations = [] if not validate else [
        check_ops.assert_rank(
            original_shape, 1, message="Original shape must be a vector."),
        check_ops.assert_rank(
            new_shape, 1, message="New shape must be a vector."),
        check_ops.assert_less_equal(
            math_ops.count_nonzero(implicit_dim, dtype=dtypes.int32),
            1,
            message="At most one dimension can be unknown."),
        check_ops.assert_positive(
            expanded_new_shape, message="Shape elements must be >=-1."),
        check_ops.assert_equal(
            math_ops.reduce_prod(expanded_new_shape),
            original_size,
            message="Shape sizes do not match."),
    ]
    return expanded_new_shape, batch_shape_static, validations
开发者ID:Ajaycs99,项目名称:tensorflow,代码行数:30,代码来源:batch_reshape.py


示例2: _MeanGrad

def _MeanGrad(op, grad):
    """Gradient for Mean."""
    sum_grad = _SumGrad(op, grad)[0]
    input_shape = array_ops.shape(op.inputs[0])
    output_shape = array_ops.shape(op.outputs[0])
    factor = _safe_shape_div(math_ops.reduce_prod(input_shape), math_ops.reduce_prod(output_shape))
    return sum_grad / math_ops.cast(factor, sum_grad.dtype), None
开发者ID:ChanningPing,项目名称:tensorflow,代码行数:7,代码来源:math_grad.py


示例3: test_docstring_example

  def test_docstring_example(self):
    # Produce the first 1000 members of the Halton sequence in 3 dimensions.
    num_results = 1000
    dim = 3
    with self.test_session():
      sample = halton.sample(dim, num_results=num_results, randomized=False)

      # Evaluate the integral of x_1 * x_2^2 * x_3^3  over the three dimensional
      # hypercube.
      powers = math_ops.range(1.0, limit=dim + 1)
      integral = math_ops.reduce_mean(
          math_ops.reduce_prod(sample ** powers, axis=-1))
      true_value = 1.0 / math_ops.reduce_prod(powers + 1.0)

      # Produces a relative absolute error of 1.7%.
      self.assertAllClose(integral.eval(), true_value.eval(), rtol=0.02)

      # Now skip the first 1000 samples and recompute the integral with the next
      # thousand samples. The sequence_indices argument can be used to do this.

      sequence_indices = math_ops.range(start=1000, limit=1000 + num_results,
                                        dtype=dtypes.int32)
      sample_leaped = halton.sample(dim, sequence_indices=sequence_indices,
                                    randomized=False)

      integral_leaped = math_ops.reduce_mean(
          math_ops.reduce_prod(sample_leaped ** powers, axis=-1))
      self.assertAllClose(integral_leaped.eval(), true_value.eval(), rtol=0.05)
开发者ID:QiangCai,项目名称:tensorflow,代码行数:28,代码来源:halton_sequence_test.py


示例4: validate_init_args

def validate_init_args(
    distribution,
    batch_shape,
    validate_args,
    batch_shape_static):
  """Helper to __init__ which makes or raises assertions."""
  with ops.name_scope(name="validate_init_args",
                      values=[batch_shape] + distribution._graph_parents):  # pylint: disable=protected-access
    runtime_assertions = []

    if batch_shape.shape.ndims is not None:
      if batch_shape.shape.ndims != 1:
        raise ValueError("`batch_shape` must be a vector "
                         "(saw rank: {}).".format(
                             batch_shape.shape.ndims))
    elif validate_args:
      runtime_assertions += [
          check_ops.assert_rank(
              batch_shape,
              1,
              message="`batch_shape` must be a vector.",
              name="assert_batch_shape_is_vector"),
      ]

    batch_size_static = np.prod(batch_shape_static)
    dist_batch_size_static = (
        None if not distribution.batch_shape.is_fully_defined()
        else np.prod(distribution.batch_shape).value)

    if batch_size_static is not None and dist_batch_size_static is not None:
      if batch_size_static != dist_batch_size_static:
        raise ValueError("`batch_shape` size ({}) must match "
                         "`distribution.batch_shape` size ({}).".format(
                             batch_size_static,
                             dist_batch_size_static))
    elif validate_args:
      runtime_assertions += [
          check_ops.assert_equal(
              math_ops.reduce_prod(batch_shape),
              math_ops.reduce_prod(distribution.batch_shape_tensor()),
              message=("`batch_shape` size must match "
                       "`distributions.batch_shape` size."),
              name="assert_batch_size"),
      ]

    if batch_shape_static is not None:
      if np.any(batch_shape_static < 1):
        raise ValueError("`batch_shape` elements must be positive "
                         "(i.e., larger than zero).")
    elif validate_args:
      runtime_assertions += [
          check_ops.assert_positive(
              batch_shape,
              message=("`batch_shape` elements must be positive "
                       "(i.e., larger than zero)."),
              name="assert_batch_shape_positive")
      ]

    return runtime_assertions
开发者ID:Jackiefan,项目名称:tensorflow,代码行数:59,代码来源:batch_reshape.py


示例5: _MeanGrad

def _MeanGrad(op, grad):
  """Gradient for Mean."""
  sum_grad = _SumGrad(op, grad)[0]
  input_shape = array_ops.shape(op.inputs[0])
  output_shape = array_ops.shape(op.outputs[0])
  # TODO(apassos) remove this device hackery as eager copy to device becomes
  # more seamless.
  with ops.colocate_with(input_shape):
    factor = _safe_shape_div(
        math_ops.reduce_prod(input_shape), math_ops.reduce_prod(output_shape))
  if context.in_eager_mode():
    factor = factor._copy(device_name=sum_grad.device)  # pylint: disable=protected-access
  return sum_grad / math_ops.cast(factor, sum_grad.dtype), None
开发者ID:keveman,项目名称:tensorflow,代码行数:13,代码来源:math_grad.py


示例6: _MeanGrad

def _MeanGrad(op, grad):
  """Gradient for Mean."""
  sum_grad = _SumGrad(op, grad)[0]
  input_size = op.inputs[0].get_shape().num_elements()
  output_size = op.outputs[0].get_shape().num_elements()
  if input_size is not None and output_size is not None:
    factor = input_size // max(output_size, 1)
    factor = constant_op.constant(factor, dtype=sum_grad.dtype)
  else:
    input_shape = array_ops.shape(op.inputs[0])
    output_shape = array_ops.shape(op.outputs[0])
    factor = _safe_shape_div(
        math_ops.reduce_prod(input_shape), math_ops.reduce_prod(output_shape))
  return sum_grad / math_ops.cast(factor, sum_grad.dtype), None
开发者ID:andrewharp,项目名称:tensorflow,代码行数:14,代码来源:math_grad.py


示例7: _sample_n

 def _sample_n(self, n, seed=None):
   # Get ids as a [n, batch_size]-shaped matrix, unless batch_shape=[] then get
   # ids as a [n]-shaped vector.
   batch_size = (np.prod(self.batch_shape.as_list(), dtype=np.int32)
                 if self.batch_shape.is_fully_defined()
                 else math_ops.reduce_prod(self.batch_shape_tensor()))
   ids = self._mixture_distribution.sample(
       sample_shape=concat_vectors(
           [n],
           distribution_util.pick_vector(
               self.is_scalar_batch(),
               np.int32([]),
               [batch_size])),
       seed=distribution_util.gen_new_seed(
           seed, "poisson_lognormal_quadrature_compound"))
   # Stride `quadrature_size` for `batch_size` number of times.
   offset = math_ops.range(start=0,
                           limit=batch_size * self._quadrature_size,
                           delta=self._quadrature_size,
                           dtype=ids.dtype)
   ids += offset
   rate = array_ops.gather(
       array_ops.reshape(self.distribution.rate, shape=[-1]), ids)
   rate = array_ops.reshape(
       rate, shape=concat_vectors([n], self.batch_shape_tensor()))
   return random_ops.random_poisson(
       lam=rate, shape=[], dtype=self.dtype, seed=seed)
开发者ID:Kongsea,项目名称:tensorflow,代码行数:27,代码来源:poisson_lognormal.py


示例8: _flip_vector_to_matrix_dynamic

def _flip_vector_to_matrix_dynamic(vec, batch_shape):
  """flip_vector_to_matrix with dynamic shapes."""
  # Shapes associated with batch_shape
  batch_rank = array_ops.size(batch_shape)

  # Shapes associated with vec.
  vec = ops.convert_to_tensor(vec, name="vec")
  vec_shape = array_ops.shape(vec)
  vec_rank = array_ops.rank(vec)
  vec_batch_rank = vec_rank - 1

  m = vec_batch_rank - batch_rank
  # vec_shape_left = [M1,...,Mm] or [].
  vec_shape_left = array_ops.slice(vec_shape, [0], [m])
  # If vec_shape_left = [], then condensed_shape = [1] since reduce_prod([]) = 1
  # If vec_shape_left = [M1,...,Mm], condensed_shape = [M1*...*Mm]
  condensed_shape = [math_ops.reduce_prod(vec_shape_left)]
  k = array_ops.gather(vec_shape, vec_rank - 1)
  new_shape = array_ops.concat(0, (batch_shape, [k], condensed_shape))

  def _flip_front_dims_to_back():
    # Permutation corresponding to [N1,...,Nn] + [k, M1,...,Mm]
    perm = array_ops.concat(
        0, (math_ops.range(m, vec_rank), math_ops.range(0, m)))
    return array_ops.transpose(vec, perm=perm)

  x_flipped = control_flow_ops.cond(
      math_ops.less(0, m),
      _flip_front_dims_to_back,
      lambda: array_ops.expand_dims(vec, -1))

  return array_ops.reshape(x_flipped, new_shape)
开发者ID:ComeOnGetMe,项目名称:tensorflow,代码行数:32,代码来源:operator_pd.py


示例9: testDegenerate

 def testDegenerate(self):
   with self.test_session(use_gpu=True):
     for dtype in (dtypes.float16, dtypes.float32, dtypes.float64):
       # A large number is needed to get Eigen to die
       x = array_ops.zeros((0, 9938), dtype=dtype)
       y = math_ops.reduce_prod(x, [0])
       self.assertAllEqual(y.eval(), np.ones(9938))
开发者ID:ThunderQi,项目名称:tensorflow,代码行数:7,代码来源:reduction_ops_test.py


示例10: test_tensor_array_grad

  def test_tensor_array_grad(self):
    inp = constant_op.constant(np.random.rand(3, 4, 2), dtype=dtypes.float32)
    ta = tensor_array_ops.TensorArray(dtypes.float32, size=3)
    ta = ta.unstack(inp)

    def loop_fn(i):

      def body(j, x):
        value = ta.gather([j])
        value = array_ops.gather(array_ops.reshape(value, [4, 2]), i)
        return j + 1, x + value

      _, out = control_flow_ops.while_loop(lambda j, _: j < 3, body,
                                           (0, array_ops.zeros([2])))
      out = math_ops.reduce_prod(out)
      return out, gradient_ops.gradients(out, inp)[0]

    pfor_out, pfor_out_grad = pfor_control_flow_ops.pfor(loop_fn, 4)
    # Note that tf.while_loop does not work in the setup above. So we manually
    # construct the equivalent computation of the above loops here.
    real_out = math_ops.reduce_sum(inp, axis=[0])
    real_out = math_ops.reduce_prod(real_out, axis=[1])
    # Note that gradients of real_out will accumulate the gradients across the
    # output value. Hence we do the same aggregation on pfor_out_grad.
    real_out_grad = gradient_ops.gradients(real_out, inp)[0]
    sum_pfor_out_grad = math_ops.reduce_sum(pfor_out_grad, axis=[0])

    with session.Session() as sess:
      v1, v2, v1_grad, v2_grad = sess.run(
          [pfor_out, real_out, sum_pfor_out_grad, real_out_grad])
      self.assertAllClose(v1, v2)
      self.assertAllClose(v1_grad, v2_grad)
开发者ID:aritratony,项目名称:tensorflow,代码行数:32,代码来源:control_flow_ops_test.py


示例11: _unblockify_then_matricize

  def _unblockify_then_matricize(self, vec):
    """Flatten the block dimensions then reshape to a batch matrix."""
    # Suppose
    #   vec.shape = [v0, v1, v2, v3],
    #   self.block_depth = 2.
    # Then
    #   leading shape = [v0, v1]
    #   block shape = [v2, v3].
    # We will reshape vec to
    #   [v1, v2*v3, v0].

    # Un-blockify: Flatten block dimensions.  Reshape
    #   [v0, v1, v2, v3] --> [v0, v1, v2*v3].
    if vec.get_shape().is_fully_defined():
      # vec_shape = [v0, v1, v2, v3]
      vec_shape = vec.get_shape().as_list()
      # vec_leading_shape = [v0, v1]
      vec_leading_shape = vec_shape[:-self.block_depth]
      # vec_block_shape = [v2, v3]
      vec_block_shape = vec_shape[-self.block_depth:]
      # flat_shape = [v0, v1, v2*v3]
      flat_shape = vec_leading_shape + [np.prod(vec_block_shape)]
    else:
      vec_shape = array_ops.shape(vec)
      vec_leading_shape = vec_shape[:-self.block_depth]
      vec_block_shape = vec_shape[-self.block_depth:]
      flat_shape = array_ops.concat(
          (vec_leading_shape, [math_ops.reduce_prod(vec_block_shape)]), 0)
    vec_flat = array_ops.reshape(vec, flat_shape)

    # Matricize:  Reshape to batch matrix.
    #   [v0, v1, v2*v3] --> [v1, v2*v3, v0],
    # representing a shape [v1] batch of [v2*v3, v0] matrices.
    matrix = distribution_util.rotate_transpose(vec_flat, shift=-1)
    return matrix
开发者ID:JonathanRaiman,项目名称:tensorflow,代码行数:35,代码来源:linear_operator_circulant.py


示例12: sequences_loss

def sequences_loss(logits, targets, weights, num_decoders,
	average_across_timesteps=True, average_across_batch=True,
	softmax_loss_function=None, name=None):
	"""Product of weighted cross-entropy loss for sequences of logits, batch-collapsed.

	Args:
	logits: Lists of 2D Tensors of shape [batch_size x num_decoder_symbols] of size num_decoders.
	targets: Lists of 1D batch-sized int32 Tensors of the same lengths as logits.
	weights: List of 1D batch-sized float-Tensors of the same length as logits.
	average_across_timesteps: If set, divide the returned cost by the total
	label weight.
	average_across_batch: If set, divide the returned cost by the batch size.
	softmax_loss_function: Function (inputs-batch, labels-batch) -> loss-batch
	to be used instead of the standard softmax (the default if this is None).
	name: Optional name for this operation, defaults to "sequence_loss".

	Returns:
	A scalar float Tensor: The products of average log-perplexities per symbol (weighted).

	Raises:
	ValueError: If len(logits) is different from len(targets) or len(weights).
	"""
	if len(targets) != len(logits) or num_decoders != len(logits):
		raise ValueError("Lengths of logits and targets must be %d, not "
			"%d, %d." % (num_decoders, len(logits), len(targets)))
	losses = []    
	for i in xrange(num_decoders):
		losses.append(tf.nn.seq2seq.sequence_loss(logits[i],targets[i], weights[i],
			average_across_timesteps,average_across_batch,softmax_loss_function,name) ) 
	return math_ops.reduce_prod(losses)
开发者ID:Sephora-M,项目名称:chord2vec,代码行数:30,代码来源:seq2seqs.py


示例13: per_step_batch_loss

  def per_step_batch_loss(self, features, mode, state):
    """Computes predictions, losses, and intermediate model states.

    Args:
      features: A dictionary with times, values, and (optionally) exogenous
          regressors. See `define_loss`.
      mode: The tf.estimator.ModeKeys mode to use (TRAIN, EVAL, INFER).
      state: Model-dependent state, each with size [batch size x ...]. The
          number and type will typically be fixed by the model (for example a
          mean and variance).
    Returns:
      A tuple of (loss, filtered_states, predictions)
        loss: Average loss values across the batch.
        filtered_states: For each Tensor in `state` with shape [batch size x
            ...], `filtered_states` has a Tensor with shape [batch size x window
            size x ...] with filtered state for each part of the batch and
            window.
        predictions: A dictionary with model-dependent one-step-ahead (or
            at-least-one-step-ahead with missing values) predictions, with keys
            indicating the type of prediction and values having shape [batch
            size x window size x ...]. For example state space models provide
            "mean", "covariance", and "log_likelihood".

    """
    self._check_graph_initialized()
    times = math_ops.cast(features[TrainEvalFeatures.TIMES], dtype=dtypes.int64)
    values = math_ops.cast(features[TrainEvalFeatures.VALUES], dtype=self.dtype)
    exogenous_regressors = self._process_exogenous_features(
        times=times,
        features={key: value for key, value in features.items()
                  if key not in [TrainEvalFeatures.TIMES,
                                 TrainEvalFeatures.VALUES]})
    def _batch_loss_filtering_step(step_number, current_times, state):
      """Make a prediction and update it based on data."""
      current_values = values[:, step_number, :]
      state = self._apply_exogenous_update(
          step_number=step_number, current_times=current_times, state=state,
          raw_features=features,
          embedded_exogenous_regressors=exogenous_regressors)
      predicted_state, predictions = self._prediction_step(
          current_times=current_times,
          state=state)
      filtered_state, outputs = self._filtering_step(
          current_times=current_times,
          current_values=current_values,
          state=predicted_state,
          predictions=predictions)
      return filtered_state, outputs
    state, outputs = self._state_update_loop(
        times=times, state=state, state_update_fn=_batch_loss_filtering_step,
        outputs=["loss"] + self._train_output_names)
    outputs["loss"].set_shape(times.get_shape())
    loss_sum = math_ops.reduce_sum(outputs["loss"])
    per_observation_loss = (loss_sum / math_ops.cast(
        math_ops.reduce_prod(array_ops.shape(times)), dtype=self.dtype))
    per_observation_loss += self._loss_additions(times, values, mode)
    # Since we have window-level additions to the loss, its per-step value is
    # misleading, so we avoid returning it.
    del outputs["loss"]
    return per_observation_loss, state, outputs
开发者ID:AutumnQYN,项目名称:tensorflow,代码行数:60,代码来源:model.py


示例14: embedding_lookup_unique

def embedding_lookup_unique(params, ids, name=None):
  """Version of embedding_lookup that avoids duplicate lookups.

  This can save communication in the case of repeated ids.
  Same interface as embedding_lookup. Except it supports multi-dimensional `ids`
  which allows to not reshape input/output to fit gather.

  Args:
    params: A list of tensors with the same shape and type, or a
      `PartitionedVariable`. Shape `[index, d1, d2, ...]`.
    ids: A one-dimensional `Tensor` with type `int32` or `int64` containing
      the ids to be looked up in `params`. Shape `[ids1, ids2, ...]`.
    name: A name for this operation (optional).

  Returns:
    A `Tensor` with the same type as the tensors in `params` and dimension of
    `[ids1, ids2, d1, d2, ...]`.

  Raises:
    ValueError: If `params` is empty.
  """
  with ops.name_scope(name, "EmbeddingLookupUnique", [params, ids]):
    ids = ops.convert_to_tensor(ids)
    shape = array_ops.shape(ids)
    ids_flat = array_ops.reshape(
        ids, math_ops.reduce_prod(shape, keep_dims=True))
    unique_ids, idx = array_ops.unique(ids_flat)
    unique_embeddings = embedding_ops.embedding_lookup(params, unique_ids)
    embeds_flat = array_ops.gather(unique_embeddings, idx)
    embed_shape = array_ops.concat(
        [shape, array_ops.shape(unique_embeddings)[1:]], 0)
    embeds = array_ops.reshape(embeds_flat, embed_shape)
    embeds.set_shape(ids.get_shape().concatenate(
        unique_embeddings.get_shape()[1:]))
    return embeds
开发者ID:1000sprites,项目名称:tensorflow,代码行数:35,代码来源:embedding_ops.py


示例15: _expand_sample_shape_to_vector

  def _expand_sample_shape_to_vector(self, x, name):
    """Helper to `sample` which ensures input is 1D."""
    x_static_val = tensor_util.constant_value(x)
    if x_static_val is None:
      prod = math_ops.reduce_prod(x)
    else:
      prod = np.prod(x_static_val, dtype=x.dtype.as_numpy_dtype())

    ndims = x.get_shape().ndims  # != sample_ndims
    if ndims is None:
      # Maybe expand_dims.
      ndims = array_ops.rank(x)
      expanded_shape = util.pick_vector(
          math_ops.equal(ndims, 0),
          np.array([1], dtype=np.int32), array_ops.shape(x))
      x = array_ops.reshape(x, expanded_shape)
    elif ndims == 0:
      # Definitely expand_dims.
      if x_static_val is not None:
        x = ops.convert_to_tensor(
            np.array([x_static_val], dtype=x.dtype.as_numpy_dtype()),
            name=name)
      else:
        x = array_ops.reshape(x, [1])
    elif ndims != 1:
      raise ValueError("Input is neither scalar nor vector.")

    return x, prod
开发者ID:omoindrot,项目名称:tensorflow,代码行数:28,代码来源:distribution.py


示例16: sample

  def sample(self, sample_shape=(), seed=None, name="sample"):
    """Generate samples of the specified shape.

    Note that a call to `sample()` without arguments will generate a single
    sample.

    Args:
      sample_shape: Rank 1 `int32` `Tensor`. Shape of the generated samples.
      seed: Python integer seed for RNG
      name: name to give to the op.

    Returns:
      samples: a `Tensor` with prepended dimensions `sample_shape`.
    """
    with ops.name_scope(self.name):
      with ops.name_scope(name, values=[sample_shape]):
        sample_shape = ops.convert_to_tensor(sample_shape,
                                             dtype=dtypes.int32,
                                             name="sample_shape")
        total = math_ops.reduce_prod(sample_shape)
        samples = self.sample_n(total, seed)
        output_shape = array_ops.concat(0, [sample_shape, array_ops.slice(
            array_ops.shape(samples), [1], [-1])])
        output = array_ops.reshape(samples, output_shape, name=name)
        output.set_shape(tensor_util.constant_value_as_shape(
            sample_shape).concatenate(samples.get_shape()[1:]))
    return output
开发者ID:abhishekns,项目名称:tensorflow,代码行数:27,代码来源:distribution.py


示例17: _entropy

 def _entropy(self):
   if (not self.distribution.is_continuous or
       not self.bijector.is_constant_jacobian):
     raise NotImplementedError("entropy is not implemented")
   # Suppose Y = g(X) where g is a diffeomorphism and X is a continuous rv. It
   # can be shown that:
   #   H[Y] = H[X] + E_X[(log o abs o det o J o g)(X)].
   # If is_constant_jacobian then:
   #   E_X[(log o abs o det o J o g)(X)] = (log o abs o det o J o g)(c)
   # where c can by anything.
   entropy = self.distribution.entropy()
   if self._is_maybe_event_override:
     # H[X] = sum_i H[X_i] if X_i are mutually independent.
     # This means that a reduce_sum is a simple rescaling.
     entropy *= math_ops.cast(math_ops.reduce_prod(self._override_event_shape),
                              dtype=entropy.dtype.base_dtype)
   if self._is_maybe_batch_override:
     new_shape = array_ops.concat([
         _ones_like(self._override_batch_shape),
         self.distribution.batch_shape_tensor()
     ], 0)
     entropy = array_ops.reshape(entropy, new_shape)
     multiples = array_ops.concat([
         self._override_batch_shape,
         _ones_like(self.distribution.batch_shape_tensor())
     ], 0)
     entropy = array_ops.tile(entropy, multiples)
   dummy = array_ops.zeros([], self.dtype)
   entropy -= self.bijector.inverse_log_det_jacobian(dummy)
   entropy.set_shape(self.batch_shape)
   return entropy
开发者ID:arnonhongklay,项目名称:tensorflow,代码行数:31,代码来源:transformed_distribution.py


示例18: _determinant_from_sigma_chol

def _determinant_from_sigma_chol(sigma_chol):
  det_last_dim = array_ops.rank(sigma_chol) - 2
  sigma_batch_diag = array_ops.batch_matrix_diag_part(sigma_chol)
  det = math_ops.square(math_ops.reduce_prod(
      sigma_batch_diag, reduction_indices=det_last_dim))
  det.set_shape(sigma_chol.get_shape()[:-2])
  return det
开发者ID:0-T-0,项目名称:tensorflow,代码行数:7,代码来源:mvn.py


示例19: _TopKGrad

def _TopKGrad(op, grad, _):
  """Return the gradients for TopK.

  Args:
    op: The TopKOp for which we need to generate gradients.
    grad: Tensor. The gradients passed to the TopKOp.

  Returns:
    A list of two tensors, the first being the gradient w.r.t to the input and
    TopK, and the second being the gradient w.r.t. to the indices (all zero).
  """
  in_shape = array_ops.shape(op.inputs[0])
  ind_shape = array_ops.shape(op.outputs[1])

  ind_lastdim = array_ops.gather(ind_shape, array_ops.size(ind_shape) - 1)
  # Flatten indices to 2D.
  ind_2d = array_ops.reshape(op.outputs[1], array_ops.stack([-1, ind_lastdim]))

  in_lastdim = array_ops.gather(in_shape, array_ops.size(in_shape) - 1)
  outerdim = array_ops.shape(ind_2d)[0]
  # Compute linear indices (flattened to 1D).
  ind = array_ops.reshape(ind_2d + array_ops.expand_dims(
      math_ops.range(0, outerdim * in_lastdim, in_lastdim), -1), [-1])

  # Substitute grad to appropriate locations and fill the rest with zeros,
  # finally reshaping it to the original input shape.
  return [array_ops.reshape(
      sparse_ops.sparse_to_dense(ind,
                                 array_ops.reshape(
                                     math_ops.reduce_prod(in_shape), [1]),
                                 array_ops.reshape(grad, [-1]),
                                 validate_indices=False),
      in_shape), array_ops.zeros(
          [], dtype=dtypes.int32)]
开发者ID:Jackhuang945,项目名称:tensorflow,代码行数:34,代码来源:nn_grad.py


示例20: _finish_prob_for_one_fiber

 def _finish_prob_for_one_fiber(self, y, x, ildj, distribution_kwargs):
   """Finish computation of prob on one element of the inverse image."""
   x = self._maybe_rotate_dims(x, rotate_right=True)
   prob = self.distribution.prob(x, **distribution_kwargs)
   if self._is_maybe_event_override:
     prob = math_ops.reduce_prod(prob, self._reduce_event_indices)
   return math_ops.exp(math_ops.cast(ildj, prob.dtype)) * prob
开发者ID:Ajaycs99,项目名称:tensorflow,代码行数:7,代码来源:conditional_transformed_distribution.py



注:本文中的tensorflow.python.ops.math_ops.reduce_prod函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。


鲜花

握手

雷人

路过

鸡蛋
该文章已有0人参与评论

请发表评论

全部评论

专题导读
上一篇:
Python math_ops.reduce_sum函数代码示例发布时间:2022-05-27
下一篇:
Python math_ops.reduce_min函数代码示例发布时间:2022-05-27
热门推荐
阅读排行榜

扫描微信二维码

查看手机版网站

随时了解更新最新资讯

139-2527-9053

在线客服(服务时间 9:00~18:00)

在线QQ客服
地址:深圳市南山区西丽大学城创智工业园
电邮:jeky_zhao#qq.com
移动电话:139-2527-9053

Powered by 互联科技 X3.4© 2001-2213 极客世界.|Sitemap