• 设为首页
  • 点击收藏
  • 手机版
    手机扫一扫访问
    迪恩网络手机版
  • 关注官方公众号
    微信扫一扫关注
    公众号

Python array_ops.concat函数代码示例

原作者: [db:作者] 来自: [db:来源] 收藏 邀请

本文整理汇总了Python中tensorflow.python.ops.array_ops.concat函数的典型用法代码示例。如果您正苦于以下问题:Python concat函数的具体用法?Python concat怎么用?Python concat使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。



在下文中一共展示了concat函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。

示例1: testConcatTuple

 def testConcatTuple(self):
   c1 = np.random.rand(4, 4)
   c2 = np.random.rand(4, 4)
   with self.test_session():
     concat_list_t = array_ops.concat(0, [c1, c2])
     concat_tuple_t = array_ops.concat(0, (c1, c2))
     self.assertAllEqual(concat_list_t.eval(), concat_tuple_t.eval())
开发者ID:kadeng,项目名称:tensorflow,代码行数:7,代码来源:concat_op_test.py


示例2: testConcatNoScalars

 def testConcatNoScalars(self):
   with self.cached_session():
     scalar = constant_op.constant(7)
     dim = array_ops.placeholder(dtypes.int32)
     with self.assertRaisesRegexp(
         ValueError, r"Can't concatenate scalars \(use tf\.stack instead\)"):
       array_ops.concat([scalar, scalar, scalar], dim)
开发者ID:JonathanRaiman,项目名称:tensorflow,代码行数:7,代码来源:concat_op_test.py


示例3: circular_pad

    def circular_pad(input_, width, kernel_size):
      """Pad input_ for computing (circular) convolution.

      Args:
        input_: the input tensor
        width: the width of the tensor.
        kernel_size: the kernel size of the filter.
      Returns:
        a tensor whose width is (width + kernel_size - 1).
      """
      beginning = kernel_size // 2
      end = kernel_size - 1 - beginning

      tmp_up = array_ops.slice(input_, [0, width - beginning, 0, 0],
                               [-1, beginning, width, -1])
      tmp_down = array_ops.slice(input_, [0, 0, 0, 0], [-1, end, width, -1])
      tmp = array_ops.concat([tmp_up, input_, tmp_down], 1)

      new_width = width + kernel_size - 1
      tmp_left = array_ops.slice(tmp, [0, 0, width - beginning, 0],
                                 [-1, new_width, beginning, -1])
      tmp_right = array_ops.slice(tmp, [0, 0, 0, 0], [-1, new_width, end, -1])

      final = array_ops.concat([tmp_left, tmp, tmp_right], 2)
      return final
开发者ID:HughKu,项目名称:tensorflow,代码行数:25,代码来源:init_ops_test.py


示例4: _testGradientsSimple

 def _testGradientsSimple(self, dtype):
   # Test both positive and negative concat axis.
   # -2 and 1 correspond to the same axis for 3-dimensional tensors.
   for axis in [-2, 1]:
     with self.cached_session(use_gpu=True):
       inp = []
       inp_tensors = []
       for x in [1, 2, 6]:
         shape = [10, x, 2]
         t = np.random.rand(*shape).astype(dtype.as_numpy_dtype)
         if dtype.is_complex:
           t += -1j * t
         inp.append(t)
         inp_tensors.append(
             constant_op.constant(
                 t.flatten(),
                 shape=shape,
                 dtype=dtype))
       c = array_ops.concat(inp_tensors, axis)
       output_shape = [10, 9, 2]
       grad_inp = np.random.rand(*output_shape).astype(dtype.as_numpy_dtype)
       if dtype.is_complex:
         grad_inp += -1j * grad_inp
       grad_tensor = constant_op.constant(
           grad_inp.flatten(), shape=output_shape)
       grad = gradients_impl.gradients([c], inp_tensors, [grad_tensor])
       concated_grad = array_ops.concat(grad, axis)
       result = self.evaluate(concated_grad)
   self.assertAllEqual(result, grad_inp)
开发者ID:JonathanRaiman,项目名称:tensorflow,代码行数:29,代码来源:concat_op_test.py


示例5: _RunAndVerifyGradientsRandom

  def _RunAndVerifyGradientsRandom(self):
    # Random dims of rank 5
    input_shape = np.random.randint(1, 5, size=5)
    # Random number of tensors
    num_tensors = np.random.randint(12, 20)
    # Random dim to concat on
    concat_dim = np.random.randint(5)
    concat_dim_sizes = np.random.randint(1, 5, size=num_tensors)
    with self.cached_session(use_gpu=True):
      inp = []
      inp_tensors = []
      for x in concat_dim_sizes:
        shape = input_shape
        shape[concat_dim] = x
        t = np.random.rand(*shape).astype("f")
        inp.append(t)
        inp_tensors.append(
            constant_op.constant(t.flatten(), shape=shape,
                                 dtype=dtypes.float32))
      c = array_ops.concat(inp_tensors, concat_dim)
      output_shape = input_shape
      output_shape[concat_dim] = concat_dim_sizes.sum()
      grad_inp = np.random.rand(*output_shape).astype("f")
      grad_tensor = constant_op.constant(grad_inp.flatten(), shape=output_shape)
      grad = gradients_impl.gradients([c], inp_tensors, [grad_tensor])
      concated_grad = array_ops.concat(grad, concat_dim)
      result = self.evaluate(concated_grad)

    self.assertAllEqual(result, grad_inp)
开发者ID:JonathanRaiman,项目名称:tensorflow,代码行数:29,代码来源:concat_op_test.py


示例6: _BiasAddGradGrad

def _BiasAddGradGrad(op, received_grad):
  """Gradient for the BiasAddGrad op.

  Args:
    op: BiasAddGrad op for which we are calculating gradients.
    received_grad: The gradients passed to the BiasAddGrad op.

  Returns:
    A single gradient Tensor for the input to BiasAddGrad (which
    is the gradient of the bias term in BiasAdd)
  """

  try:
    data_format = op.get_attr("data_format")
  except ValueError:
    data_format = None

  shape = array_ops.shape(op.inputs[0])
  rank = array_ops.rank(op.inputs[0])
  bias_shape = array_ops.shape(received_grad)

  if data_format == b"NCHW":
    expanded_shape = array_ops.concat([
        array_ops.ones_like(shape[:-3]), bias_shape,
        array_ops.ones_like(shape[-2:])
    ], 0)
    tile_mults = array_ops.concat([shape[:-3], [1], shape[-2:]], 0)
  else:
    expanded_shape = array_ops.concat(
        [array_ops.ones_like(shape[:-1]), bias_shape], 0)
    tile_mults = array_ops.concat([shape[:-1], [1]], 0)

  expanded_grad = array_ops.reshape(received_grad, expanded_shape)
  return array_ops.tile(expanded_grad, tile_mults)
开发者ID:Jackhuang945,项目名称:tensorflow,代码行数:34,代码来源:nn_grad.py


示例7: _concat_along_batch_dim

def _concat_along_batch_dim(tensor_list):
  """Concatenate tensors along batch (first) dimension.

  Args:
    tensor_list: list of Tensors or list of tuples of Tensors.

  Returns:
    Tensor or tuple of Tensors.

  Raises:
    ValueError: If 'tensor_list' is empty.

  """
  if not tensor_list:
    raise ValueError(
        "Cannot concatenate Tensors if there are no Tensors to concatenate.")

  if isinstance(tensor_list[0], (tuple, list)):
    # [(tensor1a, tensor1b),
    #  (tensor2a, tensor2b), ...] --> (tensor_a, tensor_b)
    return tuple(
        array_ops.concat(tensors, axis=0) for tensors in zip(*tensor_list))
  else:
    # [tensor1, tensor2] --> tensor
    return array_ops.concat(tensor_list, axis=0)
开发者ID:dyoung418,项目名称:tensorflow,代码行数:25,代码来源:fisher_blocks.py


示例8: sample

  def sample(self, n, seed=None, name="sample"):
    """Generate `n` samples.

    Args:
      n: scalar.  Number of samples to draw from each distribution.
      seed: Python integer seed for RNG.
      name: name to give to the op.

    Returns:
      samples: a `Tensor` of shape `(n,) + self.batch_shape` with values of type
          `self.dtype`.
    """
    with ops.name_scope(self.name):
      with ops.op_scope([self.p, n], name):
        n = ops.convert_to_tensor(n, name="n")
        p_2d = array_ops.reshape(self.p, array_ops.pack([-1, 1]))
        q_2d = 1. - p_2d
        probs = array_ops.concat(1, [q_2d, p_2d])
        samples = random_ops.multinomial(math_ops.log(probs), n, seed=seed)
        ret = array_ops.reshape(
            array_ops.transpose(samples),
            array_ops.concat(0,
                             [array_ops.expand_dims(n, 0), self.batch_shape()]))
        ret.set_shape(tensor_shape.vector(tensor_util.constant_value(n))
                      .concatenate(self.get_batch_shape()))
        return math_ops.cast(ret, self.dtype)
开发者ID:285219011,项目名称:hello-world,代码行数:26,代码来源:bernoulli.py


示例9: _forward

  def _forward(self, x):
    y = x
    # Pad the event_ndims with a zeros vector. We need this because it lets
    # us infer the scale in the inverse function.
    if self._static_event_ndims == 0:
      y = array_ops.expand_dims(y, dim=-1)
      zeros = array_ops.zeros_like(y)
    else:
      shape = array_ops.concat(0, (array_ops.shape(x)[:-1], [1]))
      zeros = array_ops.zeros(shape, dtype=y.dtype)
    y = array_ops.concat(array_ops.rank(y)-1, (y, zeros))

    # Set shape hints.
    if x.get_shape().ndims is not None:
      shape = x.get_shape().as_list()
      if self._static_event_ndims == 0:
        shape += [2]
      elif shape[-1] is not None:
        shape[-1] += 1
      shape = tensor_shape.TensorShape(shape)
      y.get_shape().assert_is_compatible_with(shape)
      y.set_shape(shape)

    # Since we only support event_ndims in [0, 1] and we do padding, we always
    # reduce over the last dimension, i.e., dim=-1 (which is the default).
    return nn_ops.softmax(y)
开发者ID:Qstar,项目名称:tensorflow,代码行数:26,代码来源:bijector.py


示例10: _get_sparse_tensors

 def _get_sparse_tensors(self, inputs, weight_collections=None,
                         trainable=None):
   sparse_tensors = self.categorical_column._get_sparse_tensors(inputs)
   id_tensor = sparse_tensors.id_tensor
   weight_tensor = sparse_tensors.weight_tensor
   # Expands final dimension, so that embeddings are not combined during
   # embedding lookup.
   check_id_rank = check_ops.assert_equal(
       array_ops.rank(id_tensor), 2,
       data=[
           'Column {} expected ID tensor of rank 2. '.format(self.name),
           'id_tensor shape: ', array_ops.shape(id_tensor)])
   with ops.control_dependencies([check_id_rank]):
     id_tensor = sparse_ops.sparse_reshape(
         id_tensor,
         shape=array_ops.concat([id_tensor.dense_shape, [1]], axis=0))
   if weight_tensor is not None:
     check_weight_rank = check_ops.assert_equal(
         array_ops.rank(weight_tensor), 2,
         data=[
             'Column {} expected weight tensor of rank 2.'.format(self.name),
             'weight_tensor shape:', array_ops.shape(weight_tensor)])
     with ops.control_dependencies([check_weight_rank]):
       weight_tensor = sparse_ops.sparse_reshape(
           weight_tensor,
           shape=array_ops.concat([weight_tensor.dense_shape, [1]], axis=0))
   return fc._CategoricalColumn.IdWeightPair(id_tensor, weight_tensor)
开发者ID:DILASSS,项目名称:tensorflow,代码行数:27,代码来源:sequential_feature_column.py


示例11: _to_dense

  def _to_dense(self):
    num_cols = 0
    rows = []
    broadcasted_blocks = [operator.to_dense() for operator in self.operators]
    broadcasted_blocks = linear_operator_util.broadcast_matrix_batch_dims(
        broadcasted_blocks)
    for block in broadcasted_blocks:
      batch_row_shape = array_ops.shape(block)[:-1]

      zeros_to_pad_before_shape = array_ops.concat(
          [batch_row_shape, [num_cols]], axis=-1)
      zeros_to_pad_before = array_ops.zeros(
          shape=zeros_to_pad_before_shape, dtype=block.dtype)
      num_cols += array_ops.shape(block)[-1]
      zeros_to_pad_after_shape = array_ops.concat(
          [batch_row_shape,
           [self.domain_dimension_tensor() - num_cols]], axis=-1)
      zeros_to_pad_after = array_ops.zeros(
          shape=zeros_to_pad_after_shape, dtype=block.dtype)

      rows.append(array_ops.concat(
          [zeros_to_pad_before, block, zeros_to_pad_after], axis=-1))

    mat = array_ops.concat(rows, axis=-2)
    mat.set_shape(self.shape)
    return mat
开发者ID:aritratony,项目名称:tensorflow,代码行数:26,代码来源:linear_operator_block_diag.py


示例12: testZerosCacheDoesntLeakAcrossModes

  def testZerosCacheDoesntLeakAcrossModes(self):
    with ops.Graph().as_default():
      t = random_ops.random_normal(shape=[100, 2])
      x = random_ops.random_normal(shape=[100, 4])
      dy = random_ops.random_normal(shape=[100, 4])
      with backprop.GradientTape() as gradient_tape:
        gradient_tape.watch(x)
        x1, _ = array_ops.split(x, num_or_size_splits=2, axis=1)
        y1 = x1 ** 2.
        y = array_ops.concat([y1, t], axis=1)

      dx = gradient_tape.gradient(y, x, output_gradients=dy)
      with self.test_session() as sess:
        sess.run(variables.global_variables_initializer())
        sess.run(dx)

    t = random_ops.random_normal(shape=[100, 2])
    x = random_ops.random_normal(shape=[100, 4])
    dy = random_ops.random_normal(shape=[100, 4])
    with backprop.GradientTape() as gradient_tape:
      gradient_tape.watch(x)
      x1, _ = array_ops.split(x, num_or_size_splits=2, axis=1)
      y1 = x1 ** 2.
      y = array_ops.concat([y1, t], axis=1)

    dx = gradient_tape.gradient(y, x, output_gradients=dy)
开发者ID:meteorcloudy,项目名称:tensorflow,代码行数:26,代码来源:backprop_test.py


示例13: same_dynamic_shape

def same_dynamic_shape(a, b):
  """Returns whether a and b have the same dynamic shape.

  Args:
    a: `Tensor`
    b: `Tensor`

  Returns:
    `Boolean` `Tensor` representing if both tensors have the same shape.
  """
  a = ops.convert_to_tensor(a, name="a")
  b = ops.convert_to_tensor(b, name="b")

  # One of the shapes isn't fully defined, so we need to use the dynamic
  # shape.
  return control_flow_ops.cond(
      math_ops.equal(array_ops.rank(a), array_ops.rank(b)),
      # Here we can't just do math_ops.equal(a.shape, b.shape), since
      # static shape inference may break the equality comparison between
      # shape(a) and shape(b) in math_ops.equal.
      lambda: math_ops.reduce_all(math_ops.equal(
          array_ops.concat((
              array_ops.shape(a),
              array_ops.shape(b)), 0),
          array_ops.concat((
              array_ops.shape(b),
              array_ops.shape(a)), 0))),
      lambda: constant_op.constant(False))
开发者ID:AliMiraftab,项目名称:tensorflow,代码行数:28,代码来源:distribution_util.py


示例14: _SparseDenseCwiseMulOrDivGrad

def _SparseDenseCwiseMulOrDivGrad(op, grad, is_mul):
  """Common code for SparseDenseCwise{Mul,Div} gradients."""
  x_indices = op.inputs[0]
  x_shape = op.inputs[2]
  y = op.inputs[3]

  y_shape = math_ops.to_int64(array_ops.shape(y))
  num_added_dims = array_ops.expand_dims(
      array_ops.size(x_shape) - array_ops.size(y_shape), 0)
  augmented_y_shape = array_ops.concat(
      [array_ops.ones(num_added_dims, ops.dtypes.int64), y_shape], 0)

  scaling = x_shape // augmented_y_shape
  scaled_indices = x_indices // scaling
  scaled_indices = array_ops.slice(scaled_indices,
                                   array_ops.concat([[0], num_added_dims], 0),
                                   [-1, -1])
  dense_vals = array_ops.gather_nd(y, scaled_indices)

  if is_mul:
    dx = grad * dense_vals
    dy_val = grad * op.inputs[1]
  else:
    dx = grad / dense_vals
    dy_val = grad * (-op.inputs[1] / math_ops.square(dense_vals))
  # indices can repeat after scaling, so we can't use sparse_to_dense().
  dy = sparse_ops.sparse_add(
      array_ops.zeros_like(y),
      sparse_tensor.SparseTensor(scaled_indices, dy_val, y_shape))

  # (sp_indices, sp_vals, sp_shape, dense)
  return (None, dx, None, dy)
开发者ID:1000sprites,项目名称:tensorflow,代码行数:32,代码来源:sparse_grad.py


示例15: quantiles_ready

  def quantiles_ready():
    """The subgraph for when the quantiles are ready."""
    quantized_feature = quantile_ops.quantiles([sparse_column_values], [],
                                               [quantile_buckets], [])
    quantized_feature = math_ops.cast(quantized_feature[0], dtypes.int64)
    quantized_feature = array_ops.reshape(quantized_feature, [-1])
    example_indices, _ = array_ops.split(
        sparse_column_indices, num_or_size_splits=2, axis=1)
    example_indices = array_ops.squeeze(example_indices, [1])
    filtered_gradients = array_ops.gather(gradients, example_indices)
    filtered_hessians = array_ops.gather(hessians, example_indices)
    filtered_partition_ids = array_ops.gather(example_partition_ids,
                                              example_indices)
    unique_partitions, mapped_partitions = array_ops.unique(
        example_partition_ids)

    # Compute aggregate stats for each partition.
    per_partition_gradients = math_ops.unsorted_segment_sum(
        gradients, mapped_partitions, array_ops.size(unique_partitions))
    per_partition_hessians = math_ops.unsorted_segment_sum(
        hessians, mapped_partitions, array_ops.size(unique_partitions))

    # Prepend a bias feature per partition that accumulates the stats for all
    # examples in that partition.
    bias_feature_ids = array_ops.fill(
        array_ops.shape(unique_partitions), _BIAS_FEATURE_ID)
    bias_feature_ids = math_ops.cast(bias_feature_ids, dtypes.int64)
    partition_ids = array_ops.concat(
        [unique_partitions, filtered_partition_ids], 0)
    filtered_gradients = array_ops.concat(
        [per_partition_gradients, filtered_gradients], 0)
    filtered_hessians = array_ops.concat(
        [per_partition_hessians, filtered_hessians], 0)
    bucket_ids = array_ops.concat([bias_feature_ids, quantized_feature], 0)
    return partition_ids, bucket_ids, filtered_gradients, filtered_hessians
开发者ID:1000sprites,项目名称:tensorflow,代码行数:35,代码来源:ordinal_split_handler.py


示例16: testConcatTuple

 def testConcatTuple(self):
   c1 = np.random.rand(4, 4)
   c2 = np.random.rand(4, 4)
   concat_list_t = array_ops.concat([c1, c2], 0)
   concat_tuple_t = array_ops.concat((c1, c2), 0)
   self.assertAllEqual(
       self.evaluate(concat_list_t), self.evaluate(concat_tuple_t))
开发者ID:adit-chandra,项目名称:tensorflow,代码行数:7,代码来源:concat_op_test.py


示例17: ReferenceDepthwiseConv2D

def ReferenceDepthwiseConv2D(input_tensor, filter_tensor, strides, padding,
                             data_format=None):
  # Reference implementation of depthwise convolution that uses regular
  # convolution.
  convs = []
  in_channels = filter_tensor.shape[2]
  # Use a custom implementation of depthwise conv2d using slicing.
  for channel in xrange(in_channels):
    # Slice the input along channel
    if data_format == "NCHW":
      input_slice = input_tensor[:, channel:channel+1, :, :]
    else:
      input_slice = input_tensor[:, :, :, channel:channel+1]

    # Slice the filters.  Filters are  H, W, InC, DepthMultiplier
    filter_slice = filter_tensor[:, :, channel:channel+1, :]
    # Do conv
    convs.append(nn_ops.conv2d(input_slice, filter_slice,
                               strides, padding,
                               data_format=data_format,
                               name="depthwise_slice_%d" % channel))

  # Concat along dimension.
  if data_format == "NCHW":
    return array_ops.concat(convs, 1)
  else:
    return array_ops.concat(convs, 3)
开发者ID:Brandon1016,项目名称:tensorflow,代码行数:27,代码来源:depthwise_conv_op_test.py


示例18: power_sums_tensor

def power_sums_tensor(array_size, power_matrix, multiplier):
  r"""Computes \sum_{i=0}^{N-1} A^i B (A^i)^T for N=0..(array_size + 1).

  Args:
    array_size: The number of non-trivial sums to pre-compute.
    power_matrix: The "A" matrix above.
    multiplier: The "B" matrix above
  Returns:
    A Tensor with S[N] = \sum_{i=0}^{N-1} A^i B (A^i)^T
      S[0] is the zero matrix
      S[1] is B
      S[2] is A B A^T + B
      ...and so on
  """
  array_size = math_ops.cast(array_size, dtypes.int32)
  power_matrix = ops.convert_to_tensor(power_matrix)
  identity_like_power_matrix = linalg_ops.eye(
      array_ops.shape(power_matrix)[0], dtype=power_matrix.dtype)
  identity_like_power_matrix.set_shape(
      ops.convert_to_tensor(power_matrix).get_shape())
  transition_powers = functional_ops.scan(
      lambda previous_power, _: math_ops.matmul(previous_power, power_matrix),
      math_ops.range(array_size - 1),
      initializer=identity_like_power_matrix)
  summed = math_ops.cumsum(
      array_ops.concat([
          array_ops.expand_dims(multiplier, 0), math_ops.matmul(
              batch_times_matrix(transition_powers, multiplier),
              transition_powers,
              adjoint_b=True)
      ], 0))
  return array_ops.concat(
      [array_ops.expand_dims(array_ops.zeros_like(multiplier), 0), summed], 0)
开发者ID:AutumnQYN,项目名称:tensorflow,代码行数:33,代码来源:math_utils.py


示例19: _format_for_tpu_embedding_sparse_batch

  def _format_for_tpu_embedding_sparse_batch(self, sparse_features):
    """Format sparse features for `enqueue_tpu_embedding_sparse_batch()`.

    Args:
      sparse_features: a `Dict` of `SparseTensor`s for embedding.

    Returns:
      Arguments for `enqueue_tpu_embedding_sparse_batch()`.
    """

    sample_idcs, embedding_idcs, aggregation_weights = list(), list(), list()
    for table in self._table_to_features_dict:
      sample_t, indices_t, weights_t = list(), list(), list()

      features = self._table_to_features_dict[table]
      for i, feature in enumerate(features):
        tensor = sparse_features[feature]
        sample_indices = tensor.indices[:, 0]
        embedding_indices = tensor.values
        weights = array_ops.ones_like(embedding_indices)
        sample_t.append(i * self._batch_size_per_core + sample_indices)
        indices_t.append(embedding_indices)
        weights_t.append(weights)

      sample_idcs.append(
          math_ops.cast(array_ops.concat(sample_t, axis=0), dtype=dtypes.int32))
      embedding_idcs.append(
          math_ops.cast(
              array_ops.concat(indices_t, axis=0), dtype=dtypes.int32))
      aggregation_weights.append(
          math_ops.cast(
              array_ops.concat(weights_t, axis=0), dtype=dtypes.float32))

    return sample_idcs, embedding_idcs, aggregation_weights
开发者ID:terrytangyuan,项目名称:tensorflow,代码行数:34,代码来源:tpu_embedding.py


示例20: _entropy

 def _entropy(self):
   if (not self.distribution.is_continuous or
       not self.bijector.is_constant_jacobian):
     raise NotImplementedError("entropy is not implemented")
   # Suppose Y = g(X) where g is a diffeomorphism and X is a continuous rv. It
   # can be shown that:
   #   H[Y] = H[X] + E_X[(log o abs o det o J o g)(X)].
   # If is_constant_jacobian then:
   #   E_X[(log o abs o det o J o g)(X)] = (log o abs o det o J o g)(c)
   # where c can by anything.
   entropy = self.distribution.entropy()
   if self._is_maybe_event_override:
     # H[X] = sum_i H[X_i] if X_i are mutually independent.
     # This means that a reduce_sum is a simple rescaling.
     entropy *= math_ops.cast(math_ops.reduce_prod(self._override_event_shape),
                              dtype=entropy.dtype.base_dtype)
   if self._is_maybe_batch_override:
     new_shape = array_ops.concat([
         _ones_like(self._override_batch_shape),
         self.distribution.batch_shape_tensor()
     ], 0)
     entropy = array_ops.reshape(entropy, new_shape)
     multiples = array_ops.concat([
         self._override_batch_shape,
         _ones_like(self.distribution.batch_shape_tensor())
     ], 0)
     entropy = array_ops.tile(entropy, multiples)
   dummy = array_ops.zeros([], self.dtype)
   entropy -= self.bijector.inverse_log_det_jacobian(dummy)
   entropy.set_shape(self.batch_shape)
   return entropy
开发者ID:arnonhongklay,项目名称:tensorflow,代码行数:31,代码来源:transformed_distribution.py



注:本文中的tensorflow.python.ops.array_ops.concat函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。


鲜花

握手

雷人

路过

鸡蛋
该文章已有0人参与评论

请发表评论

全部评论

专题导读
上一篇:
Python array_ops.concat_v2函数代码示例发布时间:2022-05-27
下一篇:
Python array_ops.check_numerics函数代码示例发布时间:2022-05-27
热门推荐
阅读排行榜

扫描微信二维码

查看手机版网站

随时了解更新最新资讯

139-2527-9053

在线客服(服务时间 9:00~18:00)

在线QQ客服
地址:深圳市南山区西丽大学城创智工业园
电邮:jeky_zhao#qq.com
移动电话:139-2527-9053

Powered by 互联科技 X3.4© 2001-2213 极客世界.|Sitemap