• 设为首页
  • 点击收藏
  • 手机版
    手机扫一扫访问
    迪恩网络手机版
  • 关注官方公众号
    微信扫一扫关注
    公众号

Python array_ops.gather_nd函数代码示例

原作者: [db:作者] 来自: [db:来源] 收藏 邀请

本文整理汇总了Python中tensorflow.python.ops.array_ops.gather_nd函数的典型用法代码示例。如果您正苦于以下问题:Python gather_nd函数的具体用法?Python gather_nd怎么用?Python gather_nd使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。



在下文中一共展示了gather_nd函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。

示例1: testEmptyIndicesAndParamsOKButJustEmptyParamsFails

  def testEmptyIndicesAndParamsOKButJustEmptyParamsFails(self):
    with self.session(use_gpu=True):
      params = np.ones((3, 3), dtype=np.float32)

      indices_empty = np.empty((0, 2), dtype=np.int32)
      gather_nd_ok_t = array_ops.gather_nd(params, indices_empty)
      gather_nd_ok_val = gather_nd_ok_t.eval()
      self.assertEqual([0], gather_nd_ok_t.get_shape())
      self.assertAllClose(np.empty((0,), dtype=np.float32), gather_nd_ok_val)

      indices_empty = np.empty((0, 1), dtype=np.int32)
      gather_nd_ok_t = array_ops.gather_nd(params, indices_empty)
      gather_nd_ok_val = gather_nd_ok_t.eval()
      self.assertEqual([0, 3], gather_nd_ok_t.get_shape())
      self.assertAllClose(np.empty((0, 3), dtype=np.float32), gather_nd_ok_val)

      params_empty = np.empty((0, 3), dtype=np.float32)
      indices_empty = np.empty((0, 2), dtype=np.int32)
      gather_nd_ok_t = array_ops.gather_nd(params_empty, indices_empty)
      gather_nd_ok_val = gather_nd_ok_t.eval()
      self.assertEqual([0], gather_nd_ok_t.get_shape())
      self.assertAllClose(np.empty((0,), dtype=np.float32), gather_nd_ok_val)

      params_empty = np.empty((0, 3), dtype=np.float32)
      indices_nonempty = np.zeros((1, 2), dtype=np.int32)
      gather_nd_break_t = array_ops.gather_nd(params_empty, indices_nonempty)
      with self.assertRaisesOpError(
          r"Requested more than 0 entries, but params is empty."):
        gather_nd_break_t.eval()
      self.assertAllClose(np.empty((0,), dtype=np.float32), gather_nd_ok_val)
开发者ID:abhinav-upadhyay,项目名称:tensorflow,代码行数:30,代码来源:gather_nd_op_test.py


示例2: _get_coordinatewise_learning_rate

  def _get_coordinatewise_learning_rate(self, grad, var):
    # Compute the learning rate using a moving average for the diagonal of BB^T
    avg_first = self.get_slot(var, 'first_moment')
    avg_second = self.get_slot(var, 'second_moment')
    decay_tensor = math_ops.cast(self._decay_tensor, var.dtype)
    batch_size = math_ops.cast(self._batch_size_tensor, var.dtype)

    # Create an estimator for the moving average of gradient mean and variance
    # via Welford's algorithm
    if isinstance(grad, ops.Tensor):
      delta = grad - avg_first
      first_moment_update = avg_first.assign_add(
          array_ops.where(self._counter < 1, math_ops.cast(1, var.dtype),
                          1. - decay_tensor) * delta)

      with ops.control_dependencies([first_moment_update]):
        second_moment_update = avg_second.assign_add(
            math_ops.cast(self._counter < 1, var.dtype) *
            -(1. - decay_tensor) * (
                avg_second - decay_tensor  * math_ops.square(delta)))
      diag_preconditioner = control_flow_ops.with_dependencies(
          [second_moment_update],
          clip_ops.clip_by_value(avg_second, 1e-12, 1e12))
    elif isinstance(grad, ops.IndexedSlices):
      delta = grad.values - array_ops.gather_nd(avg_first, grad.indices)
      first_moment_update = state_ops.scatter_add(
          avg_first,
          grad.indices,
          array_ops.where(self._counter < 1,
                          math_ops.cast(1., var.dtype),
                          1. - decay_tensor) * delta)

      with ops.control_dependencies([first_moment_update]):
        avg_second = state_ops.scatter_add(
            avg_second,
            grad.indices,
            math_ops.cast(self._counter < 1, var.dtype) *
            -(1. - decay_tensor) * (
                array_ops.gather_nd(avg_second, grad.indices) - decay_tensor *
                math_ops.square(delta)))
        avg_second = array_ops.gather_nd(avg_second, grad.indices)
        # TODO(b/70783772)
        diag_preconditioner = clip_ops.clip_by_value(avg_second, 1e-12, 1e12)
    else:
      raise errors.InvalidArgumentError(
          None, None, 'grad must of type Tensor or IndexedSlice')

    diag_preconditioner *= batch_size

    if self._use_single_learning_rate:
      diag_preconditioner = math_ops.reduce_mean(diag_preconditioner)

    # From Theorem 2 Corollary 1 of Mandt et al. 2017
    return 2. * batch_size / (
        math_ops.cast(self._total_num_examples, var.dtype.base_dtype) *
        diag_preconditioner)
开发者ID:AnddyWang,项目名称:tensorflow,代码行数:56,代码来源:variational_sgd_optimizer.py


示例3: testGradientsRank7Elements

  def testGradientsRank7Elements(self):
    # Shape [1,1,2,1,1,2,2]
    indices = constant_op.constant(
        [[[
            [[[[0, 0, 0, 0, 0, 1], [0, 0, 1, 0, 0, 0]]]],
            [[[[0, 0, 0, 0, 0, 0], [0, 0, 1, 0, 0, 1]]]]
        ]]],
        dtype=dtypes.int32)
    inputs = constant_op.constant(
        [[[
            [[[[1, 3], [5, 7]]]],
            [[[[2, 4], [6, 8]]]]
        ]]], dtype=dtypes.float64)
    outputs = array_ops.gather_nd(inputs, indices)

    grad_vals = constant_op.constant(
        [[[
            [[[[1, 2], [3, 4]]]],
            [[[[5, 6], [7, 8]]]]
        ]]], dtype=dtypes.float64)
    grads = gradients_impl.gradients([outputs], [inputs], [grad_vals])[0]
    expected_grads = np.array(
        [[[
            [[[[5, 6], [1, 2]]]],
            [[[[3, 4], [7, 8]]]]
        ]]], dtype=np.float64)
    with self.session(use_gpu=True):
      self.assertAllEqual(expected_grads, grads.eval())
开发者ID:abhinav-upadhyay,项目名称:tensorflow,代码行数:28,代码来源:gather_nd_op_test.py


示例4: _TensorScatterUpdateGrad

def _TensorScatterUpdateGrad(op, grad):
  indices = op.inputs[1]
  updates_grad = array_ops.gather_nd(grad, indices)
  tensor_grad = array_ops.tensor_scatter_update(
      array_ops.identity(grad), indices,
      array_ops.zeros_like(op.inputs[2], dtype=grad.dtype))
  return [tensor_grad, None, updates_grad]
开发者ID:Wajih-O,项目名称:tensorflow,代码行数:7,代码来源:array_grad.py


示例5: _single_seq_fn

 def _single_seq_fn():
   batch_size = array_ops.shape(inputs, out_type=tag_indices.dtype)[0]
   example_inds = array_ops.reshape(
       math_ops.range(batch_size, dtype=tag_indices.dtype), [-1, 1])
   return array_ops.gather_nd(
       array_ops.squeeze(inputs, [1]),
       array_ops.concat([example_inds, tag_indices], axis=1))
开发者ID:bikong2,项目名称:tensorflow,代码行数:7,代码来源:crf.py


示例6: _SparseDenseCwiseMulOrDivGrad

def _SparseDenseCwiseMulOrDivGrad(op, grad, is_mul):
  """Common code for SparseDenseCwise{Mul,Div} gradients."""
  x_indices = op.inputs[0]
  x_shape = op.inputs[2]
  y = op.inputs[3]

  y_shape = math_ops.to_int64(array_ops.shape(y))
  num_added_dims = array_ops.expand_dims(
      array_ops.size(x_shape) - array_ops.size(y_shape), 0)
  augmented_y_shape = array_ops.concat(
      [array_ops.ones(num_added_dims, ops.dtypes.int64), y_shape], 0)

  scaling = x_shape // augmented_y_shape
  scaled_indices = x_indices // scaling
  scaled_indices = array_ops.slice(scaled_indices,
                                   array_ops.concat([[0], num_added_dims], 0),
                                   [-1, -1])
  dense_vals = array_ops.gather_nd(y, scaled_indices)

  if is_mul:
    dx = grad * dense_vals
    dy_val = grad * op.inputs[1]
  else:
    dx = grad / dense_vals
    dy_val = grad * (-op.inputs[1] / math_ops.square(dense_vals))
  # indices can repeat after scaling, so we can't use sparse_to_dense().
  dy = sparse_ops.sparse_add(
      array_ops.zeros_like(y),
      sparse_tensor.SparseTensor(scaled_indices, dy_val, y_shape))

  # (sp_indices, sp_vals, sp_shape, dense)
  return (None, dx, None, dy)
开发者ID:1000sprites,项目名称:tensorflow,代码行数:32,代码来源:sparse_grad.py


示例7: testUnknownIndices

 def testUnknownIndices(self):
   params = constant_op.constant([[0, 1, 2]])
   indices = array_ops.placeholder(dtypes.int32)
   gather_nd_t = array_ops.gather_nd(params, indices)
   shape = gather_nd_t.get_shape()
   self.assertEqual(None, shape.ndims)
   self.assertEqual(None, tensor_shape.dimension_value(shape[0]))
开发者ID:abhinav-upadhyay,项目名称:tensorflow,代码行数:7,代码来源:gather_nd_op_test.py


示例8: dense_to_sparse_tensor

def dense_to_sparse_tensor(dense_tensor, ignore_value=None):
  """Converts dense `Tensor` to `SparseTensor`, dropping `ignore_value` cells.

  Args:
    dense_tensor: A `Tensor`.
    ignore_value: Entries in `dense_tensor` equal to this value will be
      absent from the return `SparseTensor`. If `None`, default value of
      `dense_tensor` dtype will be used (e.g. '' for `str`, 0 for `int`).

  Returns:
    A `SparseTensor` with the same shape as `dense_tensor`.

  Raises:
    ValueError: when `dense_tensor`'s rank is `None`.
  """
  with ops.name_scope("DenseToSparseTensor"):
    dense_tensor = ops.convert_to_tensor(dense_tensor)
    ignore_value = _ignore_value_tensor(dense_tensor.dtype, ignore_value)
    indices = array_ops.where(
        math_ops.not_equal(dense_tensor, ignore_value), name="indices")
    return sparse_tensor.SparseTensor(
        indices=indices,
        values=array_ops.gather_nd(dense_tensor, indices, name="values"),
        dense_shape=array_ops.shape(
            dense_tensor, out_type=dtypes.int64, name="dense_shape"))
开发者ID:1000sprites,项目名称:tensorflow,代码行数:25,代码来源:sparse_ops.py


示例9: gather_tree_from_array

def gather_tree_from_array(t, parent_ids, sequence_length):
  """Calculates the full beams for `TensorArray`s.

  Args:
    t: A stacked `TensorArray` of size `max_time` that contains `Tensor`s of
      shape `[batch_size, beam_width, s]` or `[batch_size * beam_width, s]`
      where `s` is the depth shape.
    parent_ids: The parent ids of shape `[max_time, batch_size, beam_width]`.
    sequence_length: The sequence length of shape `[batch_size, beam_width]`.

  Returns:
    A `Tensor` which is a stacked `TensorArray` of the same size and type as
    `t` and where beams are sorted in each `Tensor` according to `parent_ids`.
  """
  max_time = parent_ids.shape[0].value or array_ops.shape(parent_ids)[0]
  batch_size = parent_ids.shape[1].value or array_ops.shape(parent_ids)[1]
  beam_width = parent_ids.shape[2].value or array_ops.shape(parent_ids)[2]

  # Generate beam ids that will be reordered by gather_tree.
  beam_ids = array_ops.expand_dims(
      array_ops.expand_dims(math_ops.range(beam_width), 0), 0)
  beam_ids = array_ops.tile(beam_ids, [max_time, batch_size, 1])

  mask = array_ops.sequence_mask(
      sequence_length, maxlen=max_time, dtype=dtypes.int32)
  mask = array_ops.transpose(mask, perm=[2, 0, 1])

  # Use beam_width + 1 to mark the end of beam.
  masked_beam_ids = (beam_ids * mask) + (1 - mask) * (beam_width + 1)

  max_sequence_lengths = math_ops.to_int32(
      math_ops.reduce_max(sequence_length, axis=1))
  sorted_beam_ids = beam_search_ops.gather_tree(
      step_ids=masked_beam_ids,
      parent_ids=parent_ids,
      max_sequence_lengths=max_sequence_lengths,
      end_token=beam_width + 1)

  # For out of range steps, simply copy the same beam.
  sorted_beam_ids = array_ops.where(
      math_ops.cast(mask, dtypes.bool), x=sorted_beam_ids, y=beam_ids)

  # Generate indices for gather_nd.
  time_ind = array_ops.tile(array_ops.reshape(
      math_ops.range(max_time), [-1, 1, 1]), [1, batch_size, beam_width])
  batch_ind = array_ops.tile(array_ops.reshape(
      math_ops.range(batch_size), [-1, 1, 1]), [1, max_time, beam_width])
  batch_ind = array_ops.transpose(batch_ind, perm=[1, 0, 2])
  indices = array_ops.stack([time_ind, batch_ind, sorted_beam_ids], -1)

  # Gather from a tensor with collapsed additional dimensions.
  gather_from = t
  final_shape = array_ops.shape(gather_from)
  gather_from = array_ops.reshape(
      gather_from, [max_time, batch_size, beam_width, -1])
  ordered = array_ops.gather_nd(gather_from, indices)
  ordered = array_ops.reshape(ordered, final_shape)

  return ordered
开发者ID:BhaskarNallani,项目名称:tensorflow,代码行数:59,代码来源:beam_search_decoder.py


示例10: maybe_sample

 def maybe_sample():
   """Perform scheduled sampling."""
   where_sampling = math_ops.cast(
       array_ops.where(sample_ids > -1), dtypes.int32)
   where_not_sampling = math_ops.cast(
       array_ops.where(sample_ids <= -1), dtypes.int32)
   sample_ids_sampling = array_ops.gather_nd(sample_ids, where_sampling)
   inputs_not_sampling = array_ops.gather_nd(
       base_next_inputs, where_not_sampling)
   sampled_next_inputs = self._embedding_fn(sample_ids_sampling)
   base_shape = array_ops.shape(base_next_inputs)
   return (array_ops.scatter_nd(indices=where_sampling,
                                updates=sampled_next_inputs,
                                shape=base_shape)
           + array_ops.scatter_nd(indices=where_not_sampling,
                                  updates=inputs_not_sampling,
                                  shape=base_shape))
开发者ID:AnddyWang,项目名称:tensorflow,代码行数:17,代码来源:helper.py


示例11: testBadIndicesCPU

 def testBadIndicesCPU(self):
   with self.session(use_gpu=False):
     params = [0, 1, 2]
     indices = [[[0], [7]]]  # Make this one higher rank
     gather_nd = array_ops.gather_nd(params, indices)
     with self.assertRaisesOpError(
         r"indices\[0,1\] = \[7\] does not index into param shape \[3\]"):
       self.evaluate(gather_nd)
开发者ID:JonathanRaiman,项目名称:tensorflow,代码行数:8,代码来源:gather_nd_op_test.py


示例12: testGatherNdRefVariable

 def testGatherNdRefVariable(self):
   with self.cached_session():
     v = variables.RefVariable(constant_op.constant([[1, 2], [3, 4], [5, 6]]))
     self.evaluate(variables.global_variables_initializer())
     gather = array_ops.gather_nd(v, [[0, 1], [2, 0]])
     if not context.executing_eagerly():  # .op doesn't make sense in Eager
       self.assertEqual("GatherNd", gather.op.name)
     self.assertAllEqual([2, 5], gather)
开发者ID:adit-chandra,项目名称:tensorflow,代码行数:8,代码来源:gather_nd_op_test.py


示例13: _runGather

 def _runGather(self, params, indices):
   with self.test_session():
     paramsp = array_ops.placeholder(params.dtype)
     indicesp = array_ops.placeholder(indices.dtype)
     with self.test_scope():
       gather_nd_t = array_ops.gather_nd(paramsp, indicesp)
     feed_dict = {paramsp: params, indicesp: indices}
     return gather_nd_t.eval(feed_dict=feed_dict)
开发者ID:AndrewTwinz,项目名称:tensorflow,代码行数:8,代码来源:gather_nd_op_test.py


示例14: testBadIndicesWithSlicesCPU

 def testBadIndicesWithSlicesCPU(self):
   with self.session(use_gpu=False):
     params = [[0, 1, 2]]
     indices = [[[0], [0], [1]]]  # Make this one higher rank
     gather_nd = array_ops.gather_nd(params, indices)
     with self.assertRaisesOpError(
         r"indices\[0,2\] = \[1\] does not index into param shape \[1,3\]"):
       gather_nd.eval()
开发者ID:abhinav-upadhyay,项目名称:tensorflow,代码行数:8,代码来源:gather_nd_op_test.py


示例15: _verifyLu

  def _verifyLu(self, x, output_idx_type=dtypes.int64):
    # Verify that Px = LU.
    lu, perm = linalg_ops.lu(x, output_idx_type=output_idx_type)

    # Prepare the lower factor of shape num_rows x num_rows
    lu_shape = np.array(lu.shape.as_list())
    batch_shape = lu_shape[:-2]
    num_rows = lu_shape[-2]
    num_cols = lu_shape[-1]

    lower = array_ops.matrix_band_part(lu, -1, 0)

    if num_rows > num_cols:
      eye = linalg_ops.eye(
          num_rows, batch_shape=batch_shape, dtype=lower.dtype)
      lower = array_ops.concat([lower, eye[..., num_cols:]], axis=-1)
    elif num_rows < num_cols:
      lower = lower[..., :num_rows]

    # Fill the diagonal with ones.
    ones_diag = array_ops.ones(
        np.append(batch_shape, num_rows), dtype=lower.dtype)
    lower = array_ops.matrix_set_diag(lower, ones_diag)

    # Prepare the upper factor.
    upper = array_ops.matrix_band_part(lu, 0, -1)

    verification = math_ops.matmul(lower, upper)

    # Permute the rows of product of the Cholesky factors.
    if num_rows > 0:
      # Reshape the product of the triangular factors and permutation indices
      # to a single batch dimension. This makes it easy to apply
      # invert_permutation and gather_nd ops.
      perm_reshaped = array_ops.reshape(perm, [-1, num_rows])
      verification_reshaped = array_ops.reshape(verification,
                                                [-1, num_rows, num_cols])
      # Invert the permutation in each batch.
      inv_perm_reshaped = map_fn.map_fn(array_ops.invert_permutation,
                                        perm_reshaped)
      batch_size = perm_reshaped.shape.as_list()[0]
      # Prepare the batch indices with the same shape as the permutation.
      # The corresponding batch index is paired with each of the `num_rows`
      # permutation indices.
      batch_indices = math_ops.cast(
          array_ops.broadcast_to(
              math_ops.range(batch_size)[:, None], perm_reshaped.shape),
          dtype=output_idx_type)
      permuted_verification_reshaped = array_ops.gather_nd(
          verification_reshaped,
          array_ops.stack([batch_indices, inv_perm_reshaped], axis=-1))

      # Reshape the verification matrix back to the original shape.
      verification = array_ops.reshape(permuted_verification_reshaped,
                                       lu_shape)

    self._verifyLuBase(x, lower, upper, perm, verification,
                       output_idx_type)
开发者ID:adit-chandra,项目名称:tensorflow,代码行数:58,代码来源:lu_op_test.py


示例16: testParamsRankLargerThanIndexIndexScalarSlices

 def testParamsRankLargerThanIndexIndexScalarSlices(self):
   with self.session(use_gpu=True):
     params = np.array(
         [[-8, -1, -2, -3, -7, -5], [8, 1, 2, 3, 7, 5]], dtype=np.float32).T
     indices = constant_op.constant([4])
     gather_nd_t = array_ops.gather_nd(params, indices)
     gather_nd_val = gather_nd_t.eval()
     self.assertEqual([2], gather_nd_t.get_shape())
     self.assertAllEqual(np.array([-7, 7]), gather_nd_val)
开发者ID:abhinav-upadhyay,项目名称:tensorflow,代码行数:9,代码来源:gather_nd_op_test.py


示例17: _testSimpleDtype

  def _testSimpleDtype(self, dtype):
    with self.cached_session(use_gpu=True):
      params = constant_op.constant(np.array([8, 1, 2, 3, 7, 5], dtype=dtype))
      indices = constant_op.constant([[4], [4], [0]])
      gather_nd_t = array_ops.gather_nd(params, indices)
      gather_nd_val = gather_nd_t.eval()

    self.assertAllEqual(np.array([7, 7, 8], dtype=dtype), gather_nd_val)
    self.assertEqual([3], gather_nd_t.get_shape())
开发者ID:abhinav-upadhyay,项目名称:tensorflow,代码行数:9,代码来源:gather_nd_op_test.py


示例18: testBadIndicesCPU

 def testBadIndicesCPU(self):
   with self.test_session(use_gpu=False):
     params = [0, 1, 2]
     indices = [[[0], [7]]]  # Make this one higher rank
     gather_nd = array_ops.gather_nd(params, indices)
     with self.assertRaisesOpError(
         r"flat indices\[1, :\] = \[7\] does not index into param "
         r"\(shape: \[3\]\)"):
       gather_nd.eval()
开发者ID:Huoxubeiyin,项目名称:tensorflow,代码行数:9,代码来源:gather_nd_op_test.py


示例19: testBadIndicesWithSlices

 def testBadIndicesWithSlices(self):
   with self.test_session():
     params = [[0, 1, 2]]
     indices = [[[0], [0], [1]]]  # Make this one higher rank
     gather_nd = array_ops.gather_nd(params, indices)
     with self.assertRaisesOpError(
         r"flat indices\[2, :\] = \[1\] does not index into param "
         r"\(shape: \[1,3\]\)"):
       gather_nd.eval()
开发者ID:AlbertXiebnu,项目名称:tensorflow,代码行数:9,代码来源:gather_nd_op_test.py


示例20: _SparseReduceSumGrad

def _SparseReduceSumGrad(op, out_grad):
    """Similar to gradient for the Sum Op (i.e. tf.reduce_sum())."""
    sp_indices = op.inputs[0]
    sp_shape = op.inputs[2]
    output_shape_kept_dims = math_ops.reduced_shape(sp_shape, op.inputs[3])
    out_grad_reshaped = array_ops.reshape(out_grad, output_shape_kept_dims)
    scale = sp_shape // math_ops.to_int64(output_shape_kept_dims)
    # (sparse_indices, sparse_values, sparse_shape, reduction_axes)
    return (None, array_ops.gather_nd(out_grad_reshaped, sp_indices // scale), None, None)
开发者ID:285219011,项目名称:liuwenfeng,代码行数:9,代码来源:sparse_grad.py



注:本文中的tensorflow.python.ops.array_ops.gather_nd函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。


鲜花

握手

雷人

路过

鸡蛋
该文章已有0人参与评论

请发表评论

全部评论

专题导读
上一篇:
Python array_ops.guarantee_const函数代码示例发布时间:2022-05-27
下一篇:
Python array_ops.gather函数代码示例发布时间:2022-05-27
热门推荐
阅读排行榜

扫描微信二维码

查看手机版网站

随时了解更新最新资讯

139-2527-9053

在线客服(服务时间 9:00~18:00)

在线QQ客服
地址:深圳市南山区西丽大学城创智工业园
电邮:jeky_zhao#qq.com
移动电话:139-2527-9053

Powered by 互联科技 X3.4© 2001-2213 极客世界.|Sitemap