• 设为首页
  • 点击收藏
  • 手机版
    手机扫一扫访问
    迪恩网络手机版
  • 关注官方公众号
    微信扫一扫关注
    公众号

Python array_ops.broadcast_to函数代码示例

原作者: [db:作者] 来自: [db:来源] 收藏 邀请

本文整理汇总了Python中tensorflow.python.ops.array_ops.broadcast_to函数的典型用法代码示例。如果您正苦于以下问题:Python broadcast_to函数的具体用法?Python broadcast_to怎么用?Python broadcast_to使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。



在下文中一共展示了broadcast_to函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。

示例1: benchmarkBatchMatMulBroadcast

  def benchmarkBatchMatMulBroadcast(self):
    for (a_shape, b_shape) in self.shape_pairs:
      with compat.forward_compatibility_horizon(2019, 4, 26):
        with ops.Graph().as_default(), \
            session.Session(config=benchmark.benchmark_config()) as sess, \
            ops.device("/cpu:0"):
          matrix_a = variables.Variable(
              GetRandomNormalInput(a_shape, np.float32))
          matrix_b = variables.Variable(
              GetRandomNormalInput(b_shape, np.float32))
          variables.global_variables_initializer().run()

          # Use batch matmul op's internal broadcasting.
          self.run_op_benchmark(
              sess,
              math_ops.matmul(matrix_a, matrix_b),
              min_iters=50,
              name="batch_matmul_cpu_{}_{}".format(a_shape, b_shape))

          # Manually broadcast the input matrices using the broadcast_to op.
          broadcasted_batch_shape = array_ops.broadcast_static_shape(
              matrix_a.shape[:-2], matrix_b.shape[:-2])
          broadcasted_a_shape = broadcasted_batch_shape.concatenate(
              matrix_a.shape[-2:])
          broadcasted_b_shape = broadcasted_batch_shape.concatenate(
              matrix_b.shape[-2:])
          self.run_op_benchmark(
              sess,
              math_ops.matmul(
                  array_ops.broadcast_to(matrix_a, broadcasted_a_shape),
                  array_ops.broadcast_to(matrix_b, broadcasted_b_shape)),
              min_iters=50,
              name="batch_matmul_manual_broadcast_cpu_{}_{}".format(
                  a_shape, b_shape))
开发者ID:aritratony,项目名称:tensorflow,代码行数:34,代码来源:batch_matmul_op_test.py


示例2: testBroadcastScalarToNonScalar

 def testBroadcastScalarToNonScalar(self):
   with self.session(use_gpu=True):
     x = np.array(1.0, dtype=np.float)
     v_tf = array_ops.broadcast_to(constant_op.constant(1.0), [2, 3, 4,
                                                               1, 1, 1])
     v_np = np.broadcast_to(x, [2, 3, 4, 1, 1, 1])
     self.assertAllEqual(v_tf.eval(), v_np)
开发者ID:kylin9872,项目名称:tensorflow,代码行数:7,代码来源:broadcast_to_ops_test.py


示例3: testBroadcastToBasic

 def testBroadcastToBasic(self):
   for dtype in [np.uint8, np.uint16, np.int8, np.int16, np.int32, np.int64]:
     with self.test_session(use_gpu=True):
       x = np.array([1, 2, 3], dtype=dtype)
       v_tf = array_ops.broadcast_to(constant_op.constant(x), [3, 3])
       v_np = np.broadcast_to(x, [3, 3])
       self.assertAllEqual(v_tf.eval(), v_np)
开发者ID:AnishShah,项目名称:tensorflow,代码行数:7,代码来源:broadcast_to_ops_test.py


示例4: testBroadcastToBadOutputShape

 def testBroadcastToBadOutputShape(self):
   with context.eager_mode():
     with self.assertRaisesRegexp(errors.InvalidArgumentError,
                                  "Unable to broadcast tensor of shape"):
       self.evaluate(
           array_ops.broadcast_to(
               constant_op.constant([0, 1]), constant_op.constant([2, 1])))
开发者ID:adit-chandra,项目名称:tensorflow,代码行数:7,代码来源:broadcast_to_ops_test.py


示例5: testBroadcastToShapeLargerDim2

 def testBroadcastToShapeLargerDim2(self):
   input_shape = [2, 1, 3, 2, 2, 2, 1, 1, 1]
   output_shape = [1, 1, 1, 2, 5, 3, 2, 2, 2, 3, 3, 3]
   with self.cached_session(use_gpu=True):
     x = np.array(np.random.randint(5, size=input_shape), dtype=np.int32)
     v_tf = array_ops.broadcast_to(constant_op.constant(x), output_shape)
     v_np = np.broadcast_to(x, output_shape)
     self.assertAllEqual(v_tf.eval(), v_np)
开发者ID:kylin9872,项目名称:tensorflow,代码行数:8,代码来源:broadcast_to_ops_test.py


示例6: _broadcast_to_uniform_shape

def _broadcast_to_uniform_shape(rt_input, shape, broadcast_inner_dimensions):
  """Broadcasts rt_input to the uniform shape `shape`."""
  if isinstance(rt_input, ragged_tensor.RaggedTensor):
    raise ValueError('Incompatible with shape: ragged rank mismatch')
  if broadcast_inner_dimensions:
    return array_ops.broadcast_to(rt_input, shape.inner_dim_sizes)
  else:
    return rt_input
开发者ID:aritratony,项目名称:tensorflow,代码行数:8,代码来源:ragged_tensor_shape.py


示例7: testGradientForScalar

 def testGradientForScalar(self):
   x = constant_op.constant(1, dtype=dtypes.float32)
   v = array_ops.broadcast_to(x, [2, 4, 3])
   out = 2 * v
   with self.cached_session():
     err = gradient_checker.compute_gradient_error(x, x.get_shape(), out,
                                                   out.get_shape())
   self.assertLess(err, 1e-4)
开发者ID:kylin9872,项目名称:tensorflow,代码行数:8,代码来源:broadcast_to_ops_test.py


示例8: testGradientWithBroadcastAllDimensions

 def testGradientWithBroadcastAllDimensions(self):
   x = constant_op.constant([[1, 2, 3], [4, 5, 6]], dtype=dtypes.float32)
   v = array_ops.broadcast_to(x, [5, 4, 6])
   out = 2 * v
   with self.test_session():
     err = gradient_checker.compute_gradient_error(x, x.get_shape(),
                                                   out, out.get_shape())
   self.assertLess(err, 1e-4)
开发者ID:AnishShah,项目名称:tensorflow,代码行数:8,代码来源:broadcast_to_ops_test.py


示例9: _verifyLu

  def _verifyLu(self, x, output_idx_type=dtypes.int64):
    # Verify that Px = LU.
    lu, perm = linalg_ops.lu(x, output_idx_type=output_idx_type)

    # Prepare the lower factor of shape num_rows x num_rows
    lu_shape = np.array(lu.shape.as_list())
    batch_shape = lu_shape[:-2]
    num_rows = lu_shape[-2]
    num_cols = lu_shape[-1]

    lower = array_ops.matrix_band_part(lu, -1, 0)

    if num_rows > num_cols:
      eye = linalg_ops.eye(
          num_rows, batch_shape=batch_shape, dtype=lower.dtype)
      lower = array_ops.concat([lower, eye[..., num_cols:]], axis=-1)
    elif num_rows < num_cols:
      lower = lower[..., :num_rows]

    # Fill the diagonal with ones.
    ones_diag = array_ops.ones(
        np.append(batch_shape, num_rows), dtype=lower.dtype)
    lower = array_ops.matrix_set_diag(lower, ones_diag)

    # Prepare the upper factor.
    upper = array_ops.matrix_band_part(lu, 0, -1)

    verification = math_ops.matmul(lower, upper)

    # Permute the rows of product of the Cholesky factors.
    if num_rows > 0:
      # Reshape the product of the triangular factors and permutation indices
      # to a single batch dimension. This makes it easy to apply
      # invert_permutation and gather_nd ops.
      perm_reshaped = array_ops.reshape(perm, [-1, num_rows])
      verification_reshaped = array_ops.reshape(verification,
                                                [-1, num_rows, num_cols])
      # Invert the permutation in each batch.
      inv_perm_reshaped = map_fn.map_fn(array_ops.invert_permutation,
                                        perm_reshaped)
      batch_size = perm_reshaped.shape.as_list()[0]
      # Prepare the batch indices with the same shape as the permutation.
      # The corresponding batch index is paired with each of the `num_rows`
      # permutation indices.
      batch_indices = math_ops.cast(
          array_ops.broadcast_to(
              math_ops.range(batch_size)[:, None], perm_reshaped.shape),
          dtype=output_idx_type)
      permuted_verification_reshaped = array_ops.gather_nd(
          verification_reshaped,
          array_ops.stack([batch_indices, inv_perm_reshaped], axis=-1))

      # Reshape the verification matrix back to the original shape.
      verification = array_ops.reshape(permuted_verification_reshaped,
                                       lu_shape)

    self._verifyLuBase(x, lower, upper, perm, verification,
                       output_idx_type)
开发者ID:adit-chandra,项目名称:tensorflow,代码行数:58,代码来源:lu_op_test.py


示例10: testGradientWithIncreasingRank

 def testGradientWithIncreasingRank(self):
   x = constant_op.constant([[1], [2]],
                            dtype=dtypes.float32)
   v = array_ops.broadcast_to(x, [5, 2, 3])
   out = 2 * v
   with self.test_session():
     err = gradient_checker.compute_gradient_error(x, x.get_shape(),
                                                   out, out.get_shape())
   self.assertLess(err, 1e-4)
开发者ID:AnishShah,项目名称:tensorflow,代码行数:9,代码来源:broadcast_to_ops_test.py


示例11: testGradientWithSameRank

 def testGradientWithSameRank(self):
   x = constant_op.constant(np.reshape(np.arange(6), (2, 1, 3)),
                            dtype=dtypes.float32)
   v = array_ops.broadcast_to(x, [2, 5, 3])
   out = 2 * v
   with self.test_session():
     err = gradient_checker.compute_gradient_error(x, x.get_shape(),
                                                   out, out.get_shape())
   self.assertLess(err, 1e-4)
开发者ID:AnishShah,项目名称:tensorflow,代码行数:9,代码来源:broadcast_to_ops_test.py


示例12: testBroadcastToShape

 def testBroadcastToShape(self):
   for input_dim in range(1, 6):
     for output_dim in range(input_dim, 6):
       with self.test_session(use_gpu=True):
         input_shape = [2] * input_dim
         output_shape = [2] * output_dim
         x = np.array(np.random.randint(5, size=input_shape), dtype=np.int32)
         v_tf = array_ops.broadcast_to(constant_op.constant(x), output_shape)
         v_np = np.broadcast_to(x, output_shape)
         self.assertAllEqual(v_tf.eval(), v_np)
开发者ID:AnishShah,项目名称:tensorflow,代码行数:10,代码来源:broadcast_to_ops_test.py


示例13: testGradientForScalar

 def testGradientForScalar(self):
   # TODO(alextp): There is a bug with broadcast_to on GPU from scalars,
   # hence we make this test cpu-only.
   with ops.device("cpu:0"):
     x = constant_op.constant(1, dtype=dtypes.float32)
     v = array_ops.broadcast_to(x, [2, 4, 3])
     out = 2 * v
     with self.test_session():
       err = gradient_checker.compute_gradient_error(x, x.get_shape(),
                                                     out, out.get_shape())
   self.assertLess(err, 1e-4)
开发者ID:AnishShah,项目名称:tensorflow,代码行数:11,代码来源:broadcast_to_ops_test.py


示例14: testGradientWithLargeDim

 def testGradientWithLargeDim(self):
   input_shape = [2, 1, 3, 2, 2, 2, 1, 1, 1]
   output_shape = [1, 1, 1, 2, 5, 3, 2, 2, 2, 3, 3, 3]
   x = constant_op.constant(np.array(np.random.randn(*input_shape),
                                     dtype=np.float32))
   v = array_ops.broadcast_to(x, output_shape)
   out = 2 * v
   with self.cached_session():
     err = gradient_checker.compute_gradient_error(x, x.get_shape(),
                                                   out, out.get_shape())
   self.assertLess(err, 1e-4)
开发者ID:kylin9872,项目名称:tensorflow,代码行数:11,代码来源:broadcast_to_ops_test.py


示例15: testBroadcastToShapeTypeAndInference

 def testBroadcastToShapeTypeAndInference(self):
   for dtype in [dtypes.int32, dtypes.int64]:
     with self.test_session(use_gpu=True):
       x = np.array([1, 2, 3])
       v_tf = array_ops.broadcast_to(
           constant_op.constant(x),
           constant_op.constant([3, 3], dtype=dtype))
       shape = v_tf.get_shape().as_list()
       v_np = np.broadcast_to(x, [3, 3])
       self.assertAllEqual(v_tf.eval(), v_np)
       # check shape inference when shape input is constant
       self.assertAllEqual(shape, v_np.shape)
开发者ID:AnishShah,项目名称:tensorflow,代码行数:12,代码来源:broadcast_to_ops_test.py


示例16: _BroadcastToGrad

def _BroadcastToGrad(op, grad):
  input_value = op.inputs[0]
  broadcast_shape = op.inputs[1]
  # Assign ids for each position in input_value.
  input_value_shape = array_ops.shape(input_value)
  input_value_size = array_ops.size(input_value)
  ids = array_ops.reshape(math_ops.range(input_value_size), input_value_shape)
  broadcast_ids = array_ops.broadcast_to(ids, broadcast_shape)
  # Group by ids and sum its gradients.
  grad_flatten = array_ops.reshape(grad, [-1])
  broadcast_ids_flatten = array_ops.reshape(broadcast_ids, [-1])
  updates_grad_flatten = math_ops.unsorted_segment_sum(grad_flatten,
                                                       broadcast_ids_flatten,
                                                       input_value_size)
  updates_grad = array_ops.reshape(updates_grad_flatten, input_value_shape)
  return [updates_grad, None]
开发者ID:Wajih-O,项目名称:tensorflow,代码行数:16,代码来源:array_grad.py


示例17: batch_gather_with_default

def batch_gather_with_default(params,
                              indices,
                              default_value='',
                              name=None):
  """Same as `batch_gather` but inserts `default_value` for invalid indices.

  This operation is similar to `batch_gather` except that it will substitute
  the value for invalid indices with `default_value` as the contents.
  See `batch_gather` for more details.


  Args:
    params: A potentially ragged tensor with shape `[B1...BN, P1...PM]` (`N>=0`,
      `M>0`).
    indices: A potentially ragged tensor with shape `[B1...BN, I]` (`N>=0`).
    default_value: A value to be inserted in places where `indices` are out of
      bounds. Must be the same dtype as params and either a scalar or rank 1.
    name: A name for the operation (optional).

  Returns:
    A potentially ragged tensor with shape `[B1...BN, I, P2...PM]`.
    `result.ragged_rank = max(indices.ragged_rank, params.ragged_rank)`.

  #### Example:
    ```python
    >>> params = tf.ragged.constant([
          ['a', 'b', 'c'],
          ['d'],
          [],
          ['e']])
    >>> indices = tf.ragged.constant([[1, 2, -1], [], [], [0, 10]])
    >>> batch_gather_with_default(params, indices, 'FOO')
    [['b', 'c', 'FOO'], [], [], ['e', 'FOO']]
  ```
  """
  with ops.name_scope(name, 'RaggedBatchGatherWithDefault'):
    params = ragged_tensor.convert_to_tensor_or_ragged_tensor(
        params, name='params',
    )
    indices = ragged_tensor.convert_to_tensor_or_ragged_tensor(
        indices, name='indices',
    )
    default_value = ragged_tensor.convert_to_tensor_or_ragged_tensor(
        default_value, name='default_value',
    )
    # TODO(hterry): lift this restriction and support default_values of
    #               of rank > 1
    if (default_value.shape.ndims is not 0
        and default_value.shape.ndims is not 1):
      raise ValueError('"default_value" must be a scalar or vector')
    upper_bounds = None
    if indices.shape.ndims is None:
      raise ValueError('Indices must have a known rank.')
    if params.shape.ndims is None:
      raise ValueError('Params must have a known rank.')

    num_batch_dimensions = indices.shape.ndims - 1
    pad = None
    # The logic for this works as follows:
    # - create a padded params, where:
    #    padded_params[b1...bn, 0] = default_value
    #    padded_params[b1...bn, i] = params[b1...bn, i-1] (i>0)
    # - create an `upper_bounds` Tensor that contains the number of elements
    #   in each innermost rank. Broadcast `upper_bounds` to be the same shape
    #   as `indices`.
    # - check to see which index in `indices` are out of bounds and substitute
    #   it with the index containing `default_value` (the first).
    # - call batch_gather with the indices adjusted.
    with ops.control_dependencies([
        check_ops.assert_greater_equal(array_ops.rank(params),
                                       array_ops.rank(indices))]):
      if ragged_tensor.is_ragged(params):
        row_lengths = ragged_array_ops.expand_dims(
            params.row_lengths(axis=num_batch_dimensions),
            axis=-1)
        upper_bounds = math_ops.cast(row_lengths, indices.dtype)

        pad_shape = _get_pad_shape(params, indices)

        pad = ragged_tensor_shape.broadcast_to(
            default_value, pad_shape)
      else:
        params_shape = array_ops.shape(params)
        pad_shape = array_ops.concat([
            params_shape[:num_batch_dimensions],
            [1],
            params_shape[num_batch_dimensions + 1:params.shape.ndims]
        ], 0)
        upper_bounds = params_shape[num_batch_dimensions]
        pad = array_ops.broadcast_to(default_value, pad_shape)

      # Add `default_value` as the first value in the innermost (ragged) rank.
      pad = math_ops.cast(pad, params.dtype)
      padded_params = array_ops.concat(
          [pad, params], axis=num_batch_dimensions)

      # Adjust the indices by substituting out-of-bound indices to the
      # default-value index (which is the first element)
      shifted_indices = indices + 1
      is_out_of_bounds = (indices < 0) | (indices > upper_bounds)
#.........这里部分代码省略.........
开发者ID:ziky90,项目名称:tensorflow,代码行数:101,代码来源:ragged_batch_gather_with_default_op.py


示例18: testBroadcastToScalar

 def testBroadcastToScalar(self):
   with self.test_session(use_gpu=True):
     x = np.array(1, dtype=np.int32)
     v_tf = array_ops.broadcast_to(constant_op.constant(x), [3, 3])
     v_np = np.broadcast_to(x, [3, 3])
     self.assertAllEqual(v_tf.eval(), v_np)
开发者ID:AnishShah,项目名称:tensorflow,代码行数:6,代码来源:broadcast_to_ops_test.py


示例19: testBroadcastToBool

 def testBroadcastToBool(self):
   with self.test_session(use_gpu=True):
     x = np.array([True, False, True], dtype=np.bool)
     v_tf = array_ops.broadcast_to(constant_op.constant(x), [3, 3])
     v_np = np.broadcast_to(x, [3, 3])
     self.assertAllEqual(v_tf.eval(), v_np)
开发者ID:AnishShah,项目名称:tensorflow,代码行数:6,代码来源:broadcast_to_ops_test.py


示例20: testBroadcastToString

 def testBroadcastToString(self):
   with self.test_session(use_gpu=True):
     x = np.array([b"1", b"2", b"3"])
     v_tf = array_ops.broadcast_to(constant_op.constant(x), [3, 3])
     v_np = np.broadcast_to(x, [3, 3])
     self.assertAllEqual(v_tf.eval(), v_np)
开发者ID:AnishShah,项目名称:tensorflow,代码行数:6,代码来源:broadcast_to_ops_test.py



注:本文中的tensorflow.python.ops.array_ops.broadcast_to函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。


鲜花

握手

雷人

路过

鸡蛋
该文章已有0人参与评论

请发表评论

全部评论

专题导读
上一篇:
Python array_ops.check_numerics函数代码示例发布时间:2022-05-27
下一篇:
Python array_ops.broadcast_static_shape函数代码示例发布时间:2022-05-27
热门推荐
阅读排行榜

扫描微信二维码

查看手机版网站

随时了解更新最新资讯

139-2527-9053

在线客服(服务时间 9:00~18:00)

在线QQ客服
地址:深圳市南山区西丽大学城创智工业园
电邮:jeky_zhao#qq.com
移动电话:139-2527-9053

Powered by 互联科技 X3.4© 2001-2213 极客世界.|Sitemap