• 设为首页
  • 点击收藏
  • 手机版
    手机扫一扫访问
    迪恩网络手机版
  • 关注官方公众号
    微信扫一扫关注
    公众号

Python state_ops.scatter_sub函数代码示例

原作者: [db:作者] 来自: [db:来源] 收藏 邀请

本文整理汇总了Python中tensorflow.python.ops.state_ops.scatter_sub函数的典型用法代码示例。如果您正苦于以下问题:Python scatter_sub函数的具体用法?Python scatter_sub怎么用?Python scatter_sub使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。



在下文中一共展示了scatter_sub函数的10个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。

示例1: _apply_sparse

 def _apply_sparse(self, grad, var):
   if len(grad.indices.get_shape()) == 1:
     grad_indices = grad.indices
     grad_values = grad.values
   else:
     grad_indices = array_ops.reshape(grad.indices, [-1])
     grad_values = array_ops.reshape(grad.values, [-1, grad.values.get_shape()[-1].value])
   gidxs, metagidxs = array_ops.unique(grad_indices)
   sizegidxs = array_ops.size(gidxs)
   gvals = math_ops.unsorted_segment_sum(grad_values, metagidxs, sizegidxs)
   # m_t = mu * m + (1 - mu) * g_t
   m = self.get_slot(var, "m")
   m_scaled_g_values = gvals * (1 - self._mu_t)
   m_t = state_ops.scatter_update(m, gidxs,
                                  array_ops.gather(m, gidxs) * self._mu_t,
                                  use_locking=self._use_locking)
   m_t = state_ops.scatter_add(m_t, gidxs, m_scaled_g_values,
                               use_locking=self._use_locking)
   m_t_ = array_ops.gather(m_t, gidxs) / (1 - self._mu2_t * self._mu_power)
   # m_bar = mu * m_t + (1 - mu) * g_t
   m_bar = self._mu2_t * m_t_ + m_scaled_g_values / (1 - self._mu_power)
   var_update = state_ops.scatter_sub(var, gidxs,
                                    self._lr_t * m_bar,
                                    use_locking=self._use_locking)
   return control_flow_ops.group(*[var_update, m_t])
开发者ID:MarvinBertin,项目名称:TensorFlow-Algorithms,代码行数:25,代码来源:nesterov.py


示例2: _apply_sparse

    def _apply_sparse(self, grad, var):
        lr_t = math_ops.cast(self._lr_t, var.dtype.base_dtype)
        beta1_t = math_ops.cast(self._beta1_t, var.dtype.base_dtype)
        beta2_t = math_ops.cast(self._beta2_t, var.dtype.base_dtype)
        epsilon_t = math_ops.cast(self._epsilon_t, var.dtype.base_dtype)

        # the following equations given in [1]
        # m_t = beta1 * m + (1 - beta1) * g_t
        m = self.get_slot(var, "m")
        m_t = state_ops.scatter_update(m, grad.indices,
                                       beta1_t * array_ops.gather(m, grad.indices) +
                                       (1. - beta1_t) * grad.values,
                                       use_locking=self._use_locking)
        m_t_slice = tf.gather(m_t, grad.indices)

        # v_t = beta2 * v + (1 - beta2) * (g_t * g_t)
        v = self.get_slot(var, "v")
        v_t = state_ops.scatter_update(v, grad.indices,
                                       beta2_t * array_ops.gather(v, grad.indices) +
                                       (1. - beta2_t) * tf.square(grad.values),
                                       use_locking=self._use_locking)
        v_prime = self.get_slot(var, "v_prime")
        v_t_slice = tf.gather(v_t, grad.indices)
        v_prime_slice = tf.gather(v_prime, grad.indices)
        v_t_prime = state_ops.scatter_update(v_prime, grad.indices, tf.maximum(v_prime_slice, v_t_slice))

        v_t_prime_slice = array_ops.gather(v_t_prime, grad.indices)
        var_update = state_ops.scatter_sub(var, grad.indices,
                                           lr_t * m_t_slice / (math_ops.sqrt(v_t_prime_slice) + epsilon_t),
                                           use_locking=self._use_locking)

        return control_flow_ops.group(*[var_update, m_t, v_t, v_t_prime])
开发者ID:jkhlot,项目名称:tensorflow-XNN,代码行数:32,代码来源:optimizer.py


示例3: _apply_sparse

  def _apply_sparse(self, grad, var):
    beta1_power, beta2_power = self._get_beta_accumulators()
    beta1_power = math_ops.cast(beta1_power, var.dtype.base_dtype)
    beta2_power = math_ops.cast(beta2_power, var.dtype.base_dtype)
    lr_t = math_ops.cast(self._lr_t, var.dtype.base_dtype)
    beta1_t = math_ops.cast(self._beta1_t, var.dtype.base_dtype)
    beta2_t = math_ops.cast(self._beta2_t, var.dtype.base_dtype)
    epsilon_t = math_ops.cast(self._epsilon_t, var.dtype.base_dtype)
    lr = (lr_t * math_ops.sqrt(1 - beta2_power) / (1 - beta1_power))

    # m := beta1 * m + (1 - beta1) * g_t
    m = self.get_slot(var, "m")
    m_t = state_ops.scatter_update(m, grad.indices,
                                   beta1_t * array_ops.gather(m, grad.indices) +
                                   (1 - beta1_t) * grad.values,
                                   use_locking=self._use_locking)

    # v := beta2 * v + (1 - beta2) * (g_t * g_t)
    v = self.get_slot(var, "v")
    v_t = state_ops.scatter_update(v, grad.indices,
                                   beta2_t * array_ops.gather(v, grad.indices) +
                                   (1 - beta2_t) * math_ops.square(grad.values),
                                   use_locking=self._use_locking)

    # variable -= learning_rate * m_t / (epsilon_t + sqrt(v_t))
    m_t_slice = array_ops.gather(m_t, grad.indices)
    v_t_slice = array_ops.gather(v_t, grad.indices)
    denominator_slice = math_ops.sqrt(v_t_slice) + epsilon_t
    var_update = state_ops.scatter_sub(var, grad.indices,
                                       lr * m_t_slice / denominator_slice,
                                       use_locking=self._use_locking)
    return control_flow_ops.group(var_update, m_t, v_t)
开发者ID:AndrewTwinz,项目名称:tensorflow,代码行数:32,代码来源:lazy_adam_optimizer.py


示例4: _apply_sparse

  def _apply_sparse(self, grad, var):
    lr_t = math_ops.cast(self._lr_t, var.dtype.base_dtype)
    alpha_t = math_ops.cast(self._alpha_t, var.dtype.base_dtype)
    beta_t = math_ops.cast(self._beta_t, var.dtype.base_dtype)

    m = self.get_slot(var, 'm')
    m_t = state_ops.assign(
        m, (m * beta_t) + (grad * (1 - beta_t)), use_locking=self._use_locking)

    sign_g = ops.IndexedSlices(
        math_ops.sign(grad.values), grad.indices, dense_shape=grad.dense_shape)
    sign_gm = ops.IndexedSlices(
        array_ops.gather(math_ops.sign(m_t), sign_g.indices) * sign_g.values,
        sign_g.indices,
        dense_shape=sign_g.dense_shape)

    sign_decayed = math_ops.cast(
        self._sign_decay_t, var.dtype.base_dtype)
    multiplier_values = alpha_t + sign_decayed * sign_gm.values
    multiplier = ops.IndexedSlices(
        multiplier_values, sign_gm.indices, dense_shape=sign_gm.dense_shape)

    final_update = ops.IndexedSlices(
        lr_t * multiplier.values * grad.values,
        multiplier.indices,
        dense_shape=multiplier.dense_shape)

    var_update = state_ops.scatter_sub(
        var,
        final_update.indices,
        final_update.values,
        use_locking=self._use_locking)

    return control_flow_ops.group(* [var_update, m_t])
开发者ID:Ajaycs99,项目名称:tensorflow,代码行数:34,代码来源:addsign.py


示例5: _apply_sparse

  def _apply_sparse(self, grad, var):

    max_learning_rate = array_ops.where(self._counter < self._burnin,
                                        self._burnin_max_learning_rate,
                                        self._max_learning_rate)

    learn_rate = clip_ops.clip_by_value(
        self._get_coordinatewise_learning_rate(grad, var), 0.0,
        math_ops.cast(max_learning_rate, var.dtype))
    delta = grad.values * learn_rate

    return state_ops.scatter_sub(var, grad.indices, delta,
                                 use_locking=self._use_locking)
开发者ID:AnddyWang,项目名称:tensorflow,代码行数:13,代码来源:variational_sgd_optimizer.py


示例6: _apply_sparse

  def _apply_sparse(self, grad, var):
    beta1_power = math_ops.cast(self._beta1_power, var.dtype.base_dtype)
    beta2_power = math_ops.cast(self._beta2_power, var.dtype.base_dtype)
    lr_t = math_ops.cast(self._lr_t, var.dtype.base_dtype)
    beta1_t = math_ops.cast(self._beta1_t, var.dtype.base_dtype)
    beta2_t = math_ops.cast(self._beta2_t, var.dtype.base_dtype)
    epsilon_t = math_ops.cast(self._epsilon_t, var.dtype.base_dtype)
    lr = (lr_t * math_ops.sqrt(1 - beta2_power) / (1 - beta1_power))

    # m := beta1 * m + (1 - beta1) * g_t
    # We use a slightly different version of the moving-average update formula
    # that does a better job of handling concurrent lockless updates:
    # m -= (1 - beta1) * (m - g_t)
    m = self.get_slot(var, "m")
    m_t_delta = array_ops.gather(m, grad.indices) - grad.values
    m_t = state_ops.scatter_sub(m, grad.indices,
                                (1 - beta1_t) * m_t_delta,
                                use_locking=self._use_locking)

    # v := beta2 * v + (1 - beta2) * (g_t * g_t)
    # We reformulate the update as:
    # v -= (1 - beta2) * (v - g_t * g_t)
    v = self.get_slot(var, "v")
    v_t_delta = array_ops.gather(v, grad.indices) - math_ops.square(grad.values)
    v_t = state_ops.scatter_sub(v, grad.indices,
                                (1 - beta2_t) * v_t_delta,
                                use_locking=self._use_locking)

    # variable -= learning_rate * m_t / (epsilon_t + sqrt(v_t))
    m_t_slice = array_ops.gather(m_t, grad.indices)
    v_t_slice = array_ops.gather(v_t, grad.indices)
    denominator_slice = math_ops.sqrt(v_t_slice) + epsilon_t
    var_update = state_ops.scatter_sub(var, grad.indices,
                                       lr * m_t_slice / denominator_slice,
                                       use_locking=self._use_locking)
    return control_flow_ops.group(var_update, m_t, v_t)
开发者ID:Immexxx,项目名称:tensorflow,代码行数:36,代码来源:lazy_adam_optimizer.py


示例7: scatter_sub

    def scatter_sub(self, sparse_delta, use_locking=False):
        """Subtracts `IndexedSlices` from this variable.

    This is essentially a shortcut for `scatter_sub(self, sparse_delta.indices,
    sparse_delta.values)`.

    Args:
      sparse_delta: `IndexedSlices` to be subtracted from this variable.
      use_locking: If `True`, use locking during the operation.

    Returns:
      A `Tensor` that will hold the new value of this variable after
      the scattered subtraction has completed.

    Raises:
      ValueError: if `sparse_delta` is not an `IndexedSlices`.
    """
        if not isinstance(sparse_delta, ops.IndexedSlices):
            raise ValueError("sparse_delta is not IndexedSlices: %s" % sparse_delta)
        return state_ops.scatter_sub(self._variable, sparse_delta.indices, sparse_delta.values, use_locking=use_locking)
开发者ID:shakamunyi,项目名称:tensorflow,代码行数:20,代码来源:variables.py


示例8: testScatterSubStateOps

 def testScatterSubStateOps(self):
   with context.eager_mode():
     v = resource_variable_ops.ResourceVariable([1.0, 2.0], name="sub")
     state_ops.scatter_sub(v, [1], [3])
     self.assertAllEqual([1.0, -1.0], v.numpy())
开发者ID:aeverall,项目名称:tensorflow,代码行数:5,代码来源:resource_variable_ops_test.py


示例9: _finish

 def _finish(self, update_ops, name_scope):
   """"""
   
   caches = [update_op[0] for update_op in update_ops]
   update_ops = [update_op[1:] for update_op in update_ops]
   if self._noise is not None:
     for cache in caches:
       s_t, x_tm1 = cache[:2]
       s_t += random_ops.random_normal(x_tm1.initialized_value().get_shape(), stddev=self._noise)
       cache[0] = s_t
   
   if self._clip is not None:
     S_t = [cache[0] for cache in caches]
     S_t, _ = clip_ops.clip_by_global_norm(S_t, self._clip)
     for cache, s_t in zip(caches, S_t):
       cache[0] = s_t
   
   new_update_ops = []
   for cache, update_op in zip(caches, update_ops):
     if len(cache) == 3:
       s_t, x_tm1 = cache[:2]
       with ops.name_scope('update_' + x_tm1.op.name), ops.device(x_tm1.device):
         x_t = state_ops.assign_sub(x_tm1, s_t, use_locking=self._use_locking)
         cache.append(x_t)
     else:
       s_t_, x_tm1, idxs = cache[:3]
       with ops.name_scope('update_' + x_tm1.op.name), ops.device(x_tm1.device):
         x_t = state_ops.scatter_sub(x_tm1, idxs, s_t_, use_locking=self._use_locking)
         cache.append(x_t)
     new_update_ops.append(control_flow_ops.group(*([x_t] + update_op)))
   
   with ops.control_dependencies(new_update_ops):
     more_update_ops = []
     if self._save_step:
       for cache in caches:
         if len(cache) == 4:
           s_t, x_tm1 = cache[:2]
           s_tm1 = self.get_slot(x_tm1, 's')
           with ops.name_scope('update_' + x_tm1.op.name), ops.device(x_tm1.device):
             new_step_and_grads = []
             s_t = state_ops.assign(s_tm1, -s_t, use_locking=self._use_locking)
         else:
           s_t_, x_tm1, idxs = cache[:3]
           s_tm1 = self.get_slot(x_tm1, 's')
           with ops.name_scope('update_' + x_tm1.op.name), ops.device(x_tm1.device):
             s_t = state_ops.scatter_update(s_tm1, idxs, -s_t_, use_locking=self._use_locking)
         more_update_ops.append(s_t)
     if self._save_grad:
       for cache in caches:
         if len(cache) == 4:
           x_tm1, g_t = cache[1:3]
           g_tm1 = self.get_slot(x_tm1, 'g')
           with ops.name_scope('update_' + x_tm1.op.name), ops.device(x_tm1.device):
             new_step_and_grads = []
             g_t = state_ops.assign(g_tm1, g_t, use_locking=self._use_locking)
         else:
           x_tm1, idxs, g_t_ = cache[1:4]
           g_tm1 = self.get_slot(x_tm1, 'g')
           with ops.name_scope('update_' + x_tm1.op.name), ops.device(x_tm1.device):
             g_t = state_ops.scatter_update(g_tm1, idxs, g_t_, use_locking=self._use_locking)
         more_update_ops.append(g_t)
     
     if self._chi > 0:
       for cache in caches:
         if len(cache) == 4:
           _, x_tm1, _, x_t = cache
           with ops.name_scope('update_' + x_tm1.op.name), ops.device(x_tm1.device):
             x_and_t = self._dense_moving_average(x_tm1, x_t, 'x', self._chi)
             more_update_ops.append(control_flow_ops.group(*x_and_t))
         else:
           _, x_tm1, idxs, _, x_t = cache
           with ops.name_scope('update_' + x_tm1.op.name), ops.device(x_tm1.device):
             x_t_ = array_ops.gather(x_t, idxs)
             x_and_t = self._sparse_moving_average(x_tm1, idxs, x_t_, 'x', self._chi)
             more_update_ops.append(control_flow_ops.group(*x_and_t))
   
   return control_flow_ops.group(*(new_update_ops + more_update_ops), name=name_scope)
开发者ID:tdozat,项目名称:Optimization,代码行数:77,代码来源:optimizers.py


示例10: _scatter_sub

 def _scatter_sub(self, x, i, v):
   return state_ops.scatter_sub(
       x, i, v, use_locking=self._use_locking)
开发者ID:clsung,项目名称:tensorflow,代码行数:3,代码来源:lazy_adam_optimizer.py



注:本文中的tensorflow.python.ops.state_ops.scatter_sub函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。


鲜花

握手

雷人

路过

鸡蛋
该文章已有0人参与评论

请发表评论

全部评论

专题导读
上一篇:
Python state_ops.scatter_update函数代码示例发布时间:2022-05-27
下一篇:
Python state_ops.scatter_nd_update函数代码示例发布时间:2022-05-27
热门推荐
阅读排行榜

扫描微信二维码

查看手机版网站

随时了解更新最新资讯

139-2527-9053

在线客服(服务时间 9:00~18:00)

在线QQ客服
地址:深圳市南山区西丽大学城创智工业园
电邮:jeky_zhao#qq.com
移动电话:139-2527-9053

Powered by 互联科技 X3.4© 2001-2213 极客世界.|Sitemap