• 设为首页
  • 点击收藏
  • 手机版
    手机扫一扫访问
    迪恩网络手机版
  • 关注官方公众号
    微信扫一扫关注
    公众号

Python gradients.gradients函数代码示例

原作者: [db:作者] 来自: [db:来源] 收藏 邀请

本文整理汇总了Python中tensorflow.python.ops.gradients.gradients函数的典型用法代码示例。如果您正苦于以下问题:Python gradients函数的具体用法?Python gradients怎么用?Python gradients使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。



在下文中一共展示了gradients函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。

示例1: testUnknownUnconnectedGradientsValueGiven

 def testUnknownUnconnectedGradientsValueGiven(self):
   with ops.Graph().as_default():
     x = constant(1.0)
     y = constant(1.0)
     with self.assertRaisesRegexp(
         ValueError, "Unknown value for unconnected_gradients: 'nonsense'"):
       gradients.gradients([y], [x], unconnected_gradients="nonsense")
开发者ID:JonathanRaiman,项目名称:tensorflow,代码行数:7,代码来源:gradients_test.py


示例2: testRealOnly

 def testRealOnly(self):
   x = constant_op.constant(7+3j, dtype=dtypes.complex64)
   y = math_ops.square(x)
   with self.assertRaisesRegexp(
       TypeError,
       r"Gradients of complex tensors must set grad_ys "
       r"\(y\.dtype = tf\.complex64\)"):
     gradients.gradients(y, x)
开发者ID:didukhle,项目名称:tensorflow,代码行数:8,代码来源:gradients_test.py


示例3: testPartialDerivatives

 def testPartialDerivatives(self):
   with self.test_session():
     x = constant_op.constant(1.)
     y = 2 * x
     z = x + y
     totalg = gradients.gradients(z, [x, y])
     self.assertEqual([3.0, 1.0], [g.eval() for g in totalg])
     partialg = gradients.gradients(z, [x, y], stop_gradients=[x, y])
     self.assertEqual([1.0, 1.0], [g.eval() for g in partialg])
开发者ID:didukhle,项目名称:tensorflow,代码行数:9,代码来源:gradients_test.py


示例4: testFloorDivGrad

 def testFloorDivGrad(self):
     with self.test_session():
         a = variables.Variable(2.0)
         b = variables.Variable(4.0)
         with self.test_session() as sess:
             sess.run(variables.initialize_all_variables())
             c_grad = gradients.gradients(math_ops.div_deprecated(a, b), [a, b])
             self.assertAllEqual([x.eval() for x in c_grad], [0.25, -0.125])
             c_grad = gradients.gradients(math_ops.div(a, b), [a, b])
             self.assertAllEqual([x.eval() for x in c_grad], [0.25, -0.125])
             c_grad = gradients.gradients(math_ops.floordiv(a, b), [a, b])
             self.assertAllEqual([None if x is None else x.eval() for x in c_grad], [None, None])
开发者ID:yuikns,项目名称:tensorflow,代码行数:12,代码来源:math_ops_test.py


示例5: testFloorDivGrad

 def testFloorDivGrad(self):
   with self.test_session():
     a = variables.Variable(2.)
     b = variables.Variable(4.)
     with self.test_session() as sess:
       sess.run(variables.global_variables_initializer())
       c_grad = gradients.gradients(math_ops.divide(a, b), [a, b])
       self.assertAllEqual([x.eval() for x in c_grad], [.25, -.125])
       c_grad = gradients.gradients(math_ops.div(a, b), [a, b])
       self.assertAllEqual([x.eval() for x in c_grad], [.25, -.125])
       c_grad = gradients.gradients(math_ops.floordiv(a, b), [a, b])
       self.assertAllEqual([None if x is None else x.eval()
                            for x in c_grad], [None, None])
开发者ID:LongJun123456,项目名称:tensorflow,代码行数:13,代码来源:math_ops_test.py


示例6: testDependentYs

 def testDependentYs(self):
   with self.test_session():
     x = constant_op.constant(3.0)
     y = math_ops.square(x)
     y1 = math_ops.square(y)
     y2 = math_ops.square(y1)
     g = gradients.gradients([y, y2], x)
     self.assertAllClose(17502.0, g[0].eval())
     g = gradients.gradients(y + y2, x)
     self.assertAllClose(17502.0, g[0].eval())
     z = array_ops.identity(y)
     z2 = array_ops.identity(y2)
     g = gradients.gradients([z, z2], x)
     self.assertAllClose(17502.0, g[0].eval())
开发者ID:didukhle,项目名称:tensorflow,代码行数:14,代码来源:gradients_test.py


示例7: test_jacobian_fixed_shape

 def test_jacobian_fixed_shape(self):
   x = random_ops.random_uniform([2, 2])
   y = math_ops.matmul(x, x, transpose_a=True)
   jacobian_pfor = gradients.jacobian(y, x, use_pfor=True)
   jacobian_while = gradients.jacobian(y, x, use_pfor=False)
   answer = ops.convert_to_tensor([[
       gradient_ops.gradients(y[0][0], x)[0],
       gradient_ops.gradients(y[0][1], x)[0]
   ], [
       gradient_ops.gradients(y[1][0], x)[0],
       gradient_ops.gradients(y[1][1], x)[0]
   ]])
   self.run_and_assert_equal(answer, jacobian_pfor)
   self.run_and_assert_equal(answer, jacobian_while)
开发者ID:LongJun123456,项目名称:tensorflow,代码行数:14,代码来源:gradients_test.py


示例8: testColocateGradientsWithAggregation

 def testColocateGradientsWithAggregation(self):
   with ops.Graph().as_default() as g:
     with g.device("/gpu:1"):
       w = constant(1.0, shape=[1, 1])
     x = constant(1.0, shape=[1, 2])
     y = constant(1.0, shape=[1, 2])
     wx = math_ops.matmul(w, x)
     wy = math_ops.matmul(w, y)
     with g.device("/gpu:0"):
       z = wx + wy
     gw1 = gradients.gradients(z, [w], colocate_gradients_with_ops=True)[0]
     self.assertEquals("/gpu:1", gw1.device)
     gw2 = gradients.gradients(z, [w], colocate_gradients_with_ops=False)[0]
     self.assertEquals(None, gw2.device)
开发者ID:G3ntIeMan,项目名称:tensorflow,代码行数:14,代码来源:gradients_test.py


示例9: testCustomGradientErrors

  def testCustomGradientErrors(self):

    @custom_gradient.custom_gradient
    def F(x):

      def Grad(_):
        raise RuntimeError("x")

      return x, Grad

    with ops.Graph().as_default():
      x = constant(1.0)
      y = F(x)
      with self.assertRaises(RuntimeError):
        gradients.gradients(y, x)
开发者ID:didukhle,项目名称:tensorflow,代码行数:15,代码来源:gradients_test.py


示例10: loop_fn

 def loop_fn(i):
   image = array_ops.gather(images, i)
   label = array_ops.gather(labels, i)
   logits = array_ops.reshape(model(image, training=training), [-1])
   loss = losses.softmax_cross_entropy(
       logits=logits, onehot_labels=label, reduction=losses.Reduction.NONE)
   return gradient_ops.gradients(loss, variables.trainable_variables())
开发者ID:LongJun123456,项目名称:tensorflow,代码行数:7,代码来源:gradients_test.py


示例11: testCustomGradientWithVariables

  def testCustomGradientWithVariables(self):

    @custom_gradient.custom_gradient
    def F(x):
      out = core_layers.dense(x, 3, use_bias=False)

      def Grad(out_grad, variables=None):  # pylint: disable=redefined-outer-name
        self.assertEqual(1, len(variables))
        grads = gradients.gradients(out, [x, variables[0]], grad_ys=out_grad)
        return grads[0], [array_ops.ones((4, 3))]

      return out, Grad

    with ops.Graph().as_default():
      x = array_ops.ones((2, 4))
      with variable_scope.variable_scope("f", use_resource=True) as vs:
        y = F(x)
        all_vars = vs.global_variables()
        assert len(all_vars) == 1
      grads = gradients.gradients(y, [x, all_vars[0]])
      for g in grads:
        self.assertTrue(g is not None)
      with session.Session() as sess:
        sess.run(variables.global_variables_initializer())
        dw = sess.run(math_ops.reduce_sum(grads[1]))
        self.assertEqual(12., dw)
开发者ID:didukhle,项目名称:tensorflow,代码行数:26,代码来源:gradients_test.py


示例12: approximate_hessian

 def approximate_hessian(self, grads_and_vars, name=None):
   """
   I haven't tested this yet so I have no idea if it works, but even if it
   does it's probably super slow, and either way nothing else has been modified
   to deal with it.
   """
   
   gv = 0
   var_refs = []
   for g_t, x_tm1 in grads_and_vars:
     var_refs.append(x_tm1.ref())
     if g_t is None:
       continue
     with ops.name_scope('update_' + x_tm1.op.name), ops.device(x_tm1.device):
       if isinstance(g_t, ops.Tensor):
         gv += math_ops.reduce_sum(g_t * random_ops.random_normal(g_t.get_shape()))
       else:
         idxs, idxs_ = array_ops.unique(g_t.indices)
         g_t_ = math_ops.unsorted_segment_sum(g_t.values, idxs_, array_ops.size(idxs))
         gv += math_ops.reduce_sum(g_t_ * random_ops.random_normal(g_t_.get_shape()))
   hesses = gradients.gradients(gv, var_refs,
                                gate_gradients=(gate_gradients == Optimizer.GATE_OP),
                                aggregation_method=aggregation_method,
                                colocate_gradients_with_ops=colocate_gradients_with_ops)
   return zip([g_t for g_t, _ in grads_and_vars], [x_tm1 for _, x_tm1 in grads_and_vars], hesses)
开发者ID:tdozat,项目名称:Optimization,代码行数:25,代码来源:optimizers.py


示例13: get_gradients

  def get_gradients(self, loss, params):
    """Returns gradients of `loss` with respect to `params`.

    Arguments:
      loss: Loss tensor.
      params: List of variables.

    Returns:
      List of gradient tensors.

    Raises:
      ValueError: In case any gradient cannot be computed (e.g. if gradient
        function not implemented).
    """
    loss = self._scale_loss(loss)
    grads = gradients.gradients(loss, params)
    if None in grads:
      raise ValueError("An operation has `None` for gradient. "
                       "Please make sure that all of your ops have a "
                       "gradient defined (i.e. are differentiable). "
                       "Common ops without gradient: "
                       "K.argmax, K.round, K.eval.")
    if hasattr(self, "clipnorm"):
      grads = [clip_ops.clip_by_norm(g, self.clipnorm) for g in grads]
    if hasattr(self, "clipvalue"):
      grads = [
          clip_ops.clip_by_value(g, -self.clipvalue, self.clipvalue)
          for g in grads
      ]
    return grads
开发者ID:terrytangyuan,项目名称:tensorflow,代码行数:30,代码来源:optimizer_v2.py


示例14: test_zero_grad_tf_gradients

  def test_zero_grad_tf_gradients(self):
    if context.executing_eagerly():
      self.skipTest("tf.gradients not supported in eager.")

    x = constant_op.constant([-1., 0., 1.])
    g = self.evaluate(gradients.gradients(math_ops.pow(x, 2), x)[0])
    self.assertAllClose([-2., 0., 2.], g)
开发者ID:adit-chandra,项目名称:tensorflow,代码行数:7,代码来源:math_grad_test.py


示例15: testColocateGradientsWithAggregation

  def testColocateGradientsWithAggregation(self):
    with ops.Graph().as_default() as g:
      with g.device("/device:GPU:1"):
        w = constant(1.0, shape=[1, 1])
      x = constant(1.0, shape=[1, 2])
      y = constant(1.0, shape=[1, 2])
      wx = math_ops.matmul(w, x)
      wy = math_ops.matmul(w, y)
      with g.device("/device:GPU:0"):
        z = wx + wy

      gw1 = gradients.gradients(z, [w], colocate_gradients_with_ops=True)[0]
      self.assertEqual(gw1.op.colocation_groups(), wx.op.colocation_groups())

      gw2 = gradients.gradients(z, [w], colocate_gradients_with_ops=False)[0]
      self.assertTrue(wx.op.colocation_groups() != gw2.op.colocation_groups())
开发者ID:didukhle,项目名称:tensorflow,代码行数:16,代码来源:gradients_test.py


示例16: testAggregateGradients

  def testAggregateGradients(self):

    def fn(x):
      ind1 = tensor.Tensor(np.array([0, 1]))
      ind2 = tensor.Tensor(np.array([2, 3]))
      ind3 = tensor.Tensor(np.array([1, 3]))
      # A mixture of IndexedSlices and dense tensor to aggregate.
      g1 = embedding_ops.embedding_lookup(x, ind1)
      g2 = embedding_ops.embedding_lookup(x, ind2)
      g3 = embedding_ops.embedding_lookup(x, ind3)
      g4 = math_ops.reduce_sum(x * tensor.Tensor(2.0))
      return g1 * g2 * g3 * g4

    var_np = np.random.rand(4, 2).astype(np.float32)
    var = tensor.Tensor(var_np)
    grad = backprop.gradients_function(fn, [0])(var)[0]

    with context.graph_mode(), self.test_session():
      tf_var = array_ops.constant(var_np, dtypes.float32)
      tf_ind1 = array_ops.constant([0, 1])
      tf_ind2 = array_ops.constant([2, 3])
      tf_ind3 = array_ops.constant([1, 3])
      tf_g1 = embedding_ops.embedding_lookup(tf_var, tf_ind1)
      tf_g2 = embedding_ops.embedding_lookup(tf_var, tf_ind2)
      tf_g3 = embedding_ops.embedding_lookup(tf_var, tf_ind3)
      tf_g4 = math_ops.reduce_sum(tf_var * 2.0, reduction_indices=(0, 1))
      tf_y = tf_g1 * tf_g2 * tf_g3 * tf_g4
      tf_grad = gradients.gradients(tf_y, [tf_var])[0]

      tf_dense_grad = math_ops.unsorted_segment_sum(
          tf_grad.values, tf_grad.indices, tf_grad.dense_shape[0])

      self.assertAllClose(grad.numpy(), tf_dense_grad.eval())
开发者ID:chdinh,项目名称:tensorflow,代码行数:33,代码来源:backprop_test.py


示例17: testUnconnectedGradientsNoneUnconnectedGradients

 def testUnconnectedGradientsNoneUnconnectedGradients(self):
   with ops.Graph().as_default():
     x = constant(1.0, shape=[2, 2])
     y = constant(3.0, shape=[3, 1])
     grad = gradients.gradients(
         [y], [x], unconnected_gradients="none")
   self.assertIsNone(grad[0])
开发者ID:JonathanRaiman,项目名称:tensorflow,代码行数:7,代码来源:gradients_test.py


示例18: testPlaysNicelyWithDefunSeparateGradientScope

  def testPlaysNicelyWithDefunSeparateGradientScope(self):
    with self.test_session(graph=ops.Graph()) as sess:
      with jit.experimental_jit_scope(True):

        @function.Defun(
            compiled=True, noinline=True, separate_compiled_gradients=True)
        def mulop(x1, x2):
          return x1 * x2

        x = constant_op.constant(1.0)
        r = mulop(x, x)
        g_r = gradients.gradients(r, x, name="GA")[0]

      # Ensure the forward function is compiled.
      graph_def = r.graph.as_graph_def()
      func_attrs = graph_def.library.function[0].attr
      self.assertTrue(func_attrs["_XlaCompile"].b)
      self.assertEqual(b"jit_scope_0", func_attrs["_XlaScope"].s)

      # Ensure the gradient (SymbolicGradient) is compiled, with a different
      # _XlaScope from the function itself.
      grad_op = g_r.op.inputs[0].op
      self.assertTrue(grad_op.get_attr("_XlaCompile"))
      self.assertEqual(b"jit_scope_0_grad_GA",
                       grad_op.get_attr("_XlaScope"))

      # Ensure the ops run: grad(x1*x1) = 2*x1
      self.assertAllClose([1.0, 1.0, 2.0], sess.run([x, r, g_r]))
开发者ID:1000sprites,项目名称:tensorflow,代码行数:28,代码来源:jit_test.py


示例19: compute_gradients

  def compute_gradients(self, loss, var_list=None,
                        gate_gradients=GATE_OP,
                        aggregation_method=None,
                        colocate_gradients_with_ops=False,
                        grad_loss=None):
    """Compute gradients of `loss` for the variables in `var_list`.

    This is the first part of `minimize()`.  It returns a list
    of (gradient, variable) pairs where "gradient" is the gradient
    for "variable".  Note that "gradient" can be a `Tensor`, an
    `IndexedSlices`, or `None` if there is no gradient for the
    given variable.

    Args:
      loss: A Tensor containing the value to minimize.
      var_list: Optional list of `tf.Variable` to update to minimize
        `loss`.  Defaults to the list of variables collected in the graph
        under the key `GraphKey.TRAINABLE_VARIABLES`.
      gate_gradients: How to gate the computation of gradients.  Can be
        `GATE_NONE`, `GATE_OP`, or `GATE_GRAPH`.
      aggregation_method: Specifies the method used to combine gradient terms.
        Valid values are defined in the class `AggregationMethod`.
      colocate_gradients_with_ops: If True, try colocating gradients with
        the corresponding op.
      grad_loss: Optional. A `Tensor` holding the gradient computed for `loss`.

    Returns:
      A list of (gradient, variable) pairs. Variable is always present, but
      gradient can be `None`.

    Raises:
      TypeError: If `var_list` contains anything else than `Variable` objects.
      ValueError: If some arguments are invalid.
    """
    if gate_gradients not in [Optimizer.GATE_NONE, Optimizer.GATE_OP,
                              Optimizer.GATE_GRAPH]:
      raise ValueError("gate_gradients must be one of: Optimizer.GATE_NONE, "
                       "Optimizer.GATE_OP, Optimizer.GATE_GRAPH.  Not %s" %
                       gate_gradients)
    self._assert_valid_dtypes([loss])
    if grad_loss is not None:
      self._assert_valid_dtypes([grad_loss])
    if var_list is None:
      var_list = (
          variables.trainable_variables() +
          ops.get_collection(ops.GraphKeys.TRAINABLE_RESOURCE_VARIABLES))
    processors = [_get_processor(v) for v in var_list]
    if not var_list:
      raise ValueError("No variables to optimize.")
    var_refs = [p.target() for p in processors]
    grads = gradients.gradients(
        loss, var_refs, grad_ys=grad_loss,
        gate_gradients=(gate_gradients == Optimizer.GATE_OP),
        aggregation_method=aggregation_method,
        colocate_gradients_with_ops=colocate_gradients_with_ops)
    if gate_gradients == Optimizer.GATE_GRAPH:
      grads = control_flow_ops.tuple(grads)
    grads_and_vars = list(zip(grads, var_list))
    self._assert_valid_dtypes([v for g, v in grads_and_vars if g is not None])
    return grads_and_vars
开发者ID:Jackhuang945,项目名称:tensorflow,代码行数:60,代码来源:optimizer.py


示例20: test_tensor_array_grad

  def test_tensor_array_grad(self):
    inp = constant_op.constant(np.random.rand(3, 4, 2), dtype=dtypes.float32)
    ta = tensor_array_ops.TensorArray(dtypes.float32, size=3)
    ta = ta.unstack(inp)

    def loop_fn(i):

      def body(j, x):
        value = ta.gather([j])
        value = array_ops.gather(array_ops.reshape(value, [4, 2]), i)
        return j + 1, x + value

      _, out = control_flow_ops.while_loop(lambda j, _: j < 3, body,
                                           (0, array_ops.zeros([2])))
      out = math_ops.reduce_prod(out)
      return out, gradient_ops.gradients(out, inp)[0]

    pfor_out, pfor_out_grad = pfor_control_flow_ops.pfor(loop_fn, 4)
    # Note that tf.while_loop does not work in the setup above. So we manually
    # construct the equivalent computation of the above loops here.
    real_out = math_ops.reduce_sum(inp, axis=[0])
    real_out = math_ops.reduce_prod(real_out, axis=[1])
    # Note that gradients of real_out will accumulate the gradients across the
    # output value. Hence we do the same aggregation on pfor_out_grad.
    real_out_grad = gradient_ops.gradients(real_out, inp)[0]
    sum_pfor_out_grad = math_ops.reduce_sum(pfor_out_grad, axis=[0])

    with session.Session() as sess:
      v1, v2, v1_grad, v2_grad = sess.run(
          [pfor_out, real_out, sum_pfor_out_grad, real_out_grad])
      self.assertAllClose(v1, v2)
      self.assertAllClose(v1_grad, v2_grad)
开发者ID:aritratony,项目名称:tensorflow,代码行数:32,代码来源:control_flow_ops_test.py



注:本文中的tensorflow.python.ops.gradients.gradients函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。


鲜花

握手

雷人

路过

鸡蛋
该文章已有0人参与评论

请发表评论

全部评论

专题导读
上一篇:
Python gradients.hessians函数代码示例发布时间:2022-05-27
下一篇:
Python gradient_checker_v2.compute_gradient函数代码示例发布时间:2022-05-27
热门推荐
阅读排行榜

扫描微信二维码

查看手机版网站

随时了解更新最新资讯

139-2527-9053

在线客服(服务时间 9:00~18:00)

在线QQ客服
地址:深圳市南山区西丽大学城创智工业园
电邮:jeky_zhao#qq.com
移动电话:139-2527-9053

Powered by 互联科技 X3.4© 2001-2213 极客世界.|Sitemap