• 设为首页
  • 点击收藏
  • 手机版
    手机扫一扫访问
    迪恩网络手机版
  • 关注官方公众号
    微信扫一扫关注
    公众号

Python backprop.implicit_grad函数代码示例

原作者: [db:作者] 来自: [db:来源] 收藏 邀请

本文整理汇总了Python中tensorflow.python.eager.backprop.implicit_grad函数的典型用法代码示例。如果您正苦于以下问题:Python implicit_grad函数的具体用法?Python implicit_grad怎么用?Python implicit_grad使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。



在下文中一共展示了implicit_grad函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。

示例1: testDefunCanBeDifferentiatedTwice

  def testDefunCanBeDifferentiatedTwice(self):
    v = resource_variable_ops.ResourceVariable(1.0)

    @function.defun
    def f():
      return v * v

    self.assertAllEqual(backprop.implicit_grad(f)()[0][0], 2.0)
    # Ensure that v is watched again.
    self.assertAllEqual(backprop.implicit_grad(f)()[0][0], 2.0)
开发者ID:StephenOman,项目名称:tensorflow,代码行数:10,代码来源:function_test.py


示例2: testDefunDifferentiable

  def testDefunDifferentiable(self):
    v = resource_variable_ops.ResourceVariable(1.0)

    @function.defun
    def f():
      return v * v

    self.assertAllEqual(backprop.implicit_grad(f)()[0][0], 2.0)
开发者ID:StephenOman,项目名称:tensorflow,代码行数:8,代码来源:function_test.py


示例3: testGradientOfGatherWithDefun

  def testGradientOfGatherWithDefun(self):
    v = resource_variable_ops.ResourceVariable([0.0, 1.0, 2.0])

    def sum_gather():
      return math_ops.reduce_sum(array_ops.gather(v, [1, 2]))

    grad_fn = backprop.implicit_grad(sum_gather)
    gradient = grad_fn()
    defun_grad_fn = backprop.implicit_grad(function.defun(sum_gather))
    defun_gradient = defun_grad_fn()
    self.assertEqual(len(gradient), len(defun_gradient))

    gradient = gradient[0][0]
    defun_gradient = defun_gradient[0][0]
    self.assertAllEqual(gradient.values, defun_gradient.values)
    self.assertAllEqual(gradient.indices, defun_gradient.indices)
    self.assertAllEqual(gradient.dense_shape, defun_gradient.dense_shape)
开发者ID:StephenOman,项目名称:tensorflow,代码行数:17,代码来源:function_test.py


示例4: testUnconnectedNone

  def testUnconnectedNone(self):
    v = resource_variable_ops.ResourceVariable(
        1.0, name='testUnconnectedNone')

    def f():
      v.read_value()
      return constant_op.constant(1.0)

    self.assertEqual(backprop.implicit_grad(f)()[0][0], None)
开发者ID:Wajih-O,项目名称:tensorflow,代码行数:9,代码来源:backprop_test.py


示例5: _test_minimize_loss_graph

  def _test_minimize_loss_graph(self, d, soft_placement=False,
                                learning_rate=0.2):
    config = config_pb2.ConfigProto()
    config.allow_soft_placement = soft_placement
    config.gpu_options.per_process_gpu_memory_fraction = 0.3
    with context.graph_mode(), \
         ops.Graph().as_default(), \
         self.test_session(config=config) as sess, \
         d.scope():
      l = core.Dense(1, use_bias=False)

      def loss(x):
        # TODO(josh11b): What if this constant was instead a captured
        # value?  Would it need to be a value that has been passed
        # through d.broadcast()?
        y = array_ops.reshape(l(x), []) - constant_op.constant(1.)
        return y * y

      grad_fn = backprop.implicit_grad(loss)

      def update(v, g):
        return v.assign_sub(learning_rate * g)

      one = d.broadcast(constant_op.constant([[1.]]))

      def step():
        """Perform one optimization step."""
        # Run forward & backward to get gradients, variables list.
        g_v = d.call_for_each_tower(grad_fn, one)

        # Update the variables using the gradients and the update() function.
        before_list = []
        after_list = []
        for g, v in g_v:
          fetched = d.read_var(v)
          before_list.append(fetched)
          with ops.control_dependencies([fetched]):
            g = d.reduce(
                variable_scope.VariableAggregation.SUM, g, destinations=v)
            with ops.control_dependencies(d.update(
                v, update, g, grouped=False)):
              after_list.append(d.read_var(v))
        return before_list, after_list

      before_out, after_out = step()
      variables.global_variables_initializer().run()
      for i in range(10):
        b, a = sess.run((before_out, after_out))
        if i == 0:
          before, = b
        after, = a

      error_before = abs(before - 1)
      error_after = abs(after - 1)
      # Error should go down
      self.assertLess(error_after, error_before)
开发者ID:ThunderQi,项目名称:tensorflow,代码行数:56,代码来源:strategy_test_lib.py


示例6: testImplicitGradWithResourceVariable

 def testImplicitGradWithResourceVariable(self):
   x = resource_variable_ops.ResourceVariable(initial_value=tensor.Tensor(1.0),
                                              name='x')
   def fn():
     tape.watch(x.handle)
     b = tensor.Tensor(2.0)
     c = math_ops.add(x.value(), b)
     return math_ops.add(c, tensor.Tensor(3.0))
   grad = backprop.implicit_grad(fn)()[0][1]
   self.assertEqual(grad.numpy(), 1.0)
开发者ID:piyushjaiswal98,项目名称:tensorflow,代码行数:10,代码来源:backprop_test.py


示例7: testGradients

  def testGradients(self):
    @graph_callable.graph_callable([])
    def my_function():
      v = variable_scope.get_variable(
          "v", initializer=init_ops.constant_initializer(3.), shape=())
      return v * v

    grad_fn = backprop.implicit_grad(my_function)
    grads_and_vars = list(zip(*grad_fn()))
    self.assertAllEqual(6., grads_and_vars[0][0])
开发者ID:AbhinavJain13,项目名称:tensorflow,代码行数:10,代码来源:graph_callable_test.py


示例8: testVariableGradient

  def testVariableGradient(self):
    with self.test_scope():
      v0 = resource_variable_ops.ResourceVariable(1.0)

      def f():
        x = v0 * v0
        return x

      grads = backprop.implicit_grad(f)()
    self.assertEqual(2., grads[0][0].numpy())
开发者ID:JonathanRaiman,项目名称:tensorflow,代码行数:10,代码来源:eager_test.py


示例9: _test_minimize_loss_graph

  def _test_minimize_loss_graph(self,
                                d,
                                soft_placement=False,
                                learning_rate=0.2):
    config = config_pb2.ConfigProto()
    config.allow_soft_placement = soft_placement
    config.gpu_options.per_process_gpu_memory_fraction = 0.3
    with context.graph_mode(), \
         ops.Graph().as_default(), \
         self.cached_session(config=config) as sess, \
         d.scope():
      l = core.Dense(1, use_bias=False)

      def loss(x):
        y = array_ops.reshape(l(x), []) - constant_op.constant(1.)
        return y * y

      grad_fn = backprop.implicit_grad(loss)

      def update(v, g):
        return v.assign_sub(learning_rate * g)

      one = constant_op.constant([[1.]])

      def step():
        """Perform one optimization step."""
        # Run forward & backward to get gradients, variables list.
        g_v = d.extended.call_for_each_replica(grad_fn, args=(one,))

        # Update the variables using the gradients and the update() function.
        before_list = []
        after_list = []
        for g, v in g_v:
          fetched = d.extended.read_var(v)
          before_list.append(fetched)
          with ops.control_dependencies([fetched]):
            g = d.extended.reduce_to(
                reduce_util.ReduceOp.SUM, g, destinations=v)
            with ops.control_dependencies(
                d.extended.update(v, update, args=(g,), group=False)):
              after_list.append(d.extended.read_var(v))
        return before_list, after_list

      before_out, after_out = step()
      variables.global_variables_initializer().run()
      for i in range(10):
        b, a = sess.run((before_out, after_out))
        if i == 0:
          before, = b
        after, = a

      error_before = abs(before - 1)
      error_after = abs(after - 1)
      # Error should go down
      self.assertLess(error_after, error_before)
开发者ID:adit-chandra,项目名称:tensorflow,代码行数:55,代码来源:strategy_test_lib.py


示例10: testGPUImplicitGrad

  def testGPUImplicitGrad(self):
    with context.device('gpu:0'):
      v = resource_variable_ops.ResourceVariable(
          constant_op.constant(1.0), name='v')

    def f():
      with context.device('gpu:0'):
        return v.read_value()

    self.assertEqual(
        backprop.implicit_grad(f)()[0][0].cpu().numpy(), 1.0)
开发者ID:adit-chandra,项目名称:tensorflow,代码行数:11,代码来源:backprop_test.py


示例11: testReturningNonTensorRaisesError

  def testReturningNonTensorRaisesError(self):
    optimizer = momentum.MomentumOptimizer(learning_rate=1.0, momentum=1.0)
    optimizer.apply_gradients = function.defun(optimizer.apply_gradients)
    v = resource_variable_ops.ResourceVariable(1.0)
    grad = backprop.implicit_grad(lambda v: v**2)(v)

    with self.assertRaisesRegexp(TypeError,
                                 '.*must return zero or more Tensors.*'):
      # TODO(akshayka): We might want to allow defun-ing Python functions
      # that return operations (and just execute the op instead of running it).
      optimizer.apply_gradients(grad)
开发者ID:Wajih-O,项目名称:tensorflow,代码行数:11,代码来源:auto_control_deps_test.py


示例12: testEarlyGradAggregation

  def testEarlyGradAggregation(self):
    # Needs to be a list so mutations by the callback affect this function.
    add_n = []
    def callback(op_type, unused_1, unused_2, unused_3, unused_4):
      if compat.as_bytes(op_type) == compat.as_bytes('AddN'):
        add_n.append(1)
    context.context().add_post_execution_callback(callback)

    v = resource_variable_ops.ResourceVariable(constant_op.constant(2.0),
                                               name='v')
    def fn():
      outputs = []
      for _ in range(20):
        outputs.append(v * constant_op.constant(2.0))
      return math_ops.add_n(outputs)

    # By default the aggregation count is 2.
    _ = backprop.implicit_grad(fn)()[0][1]
    self.assertEqual(len(add_n), 2)
    del add_n[:]

    # Reduce the aggregation limit, cause the backprop to do some
    # early aggregation.
    # pylint: disable=protected-access
    old_cnt = imperative_grad._MIN_AGGREGATE_COUNT
    old_bytes = imperative_grad._MIN_AGGREGATE_BYTES
    imperative_grad._MIN_AGGREGATE_COUNT = 10
    imperative_grad._MIN_AGGREGATE_BYTES = 1
    _ = backprop.implicit_grad(fn)()
    self.assertEqual(len(add_n), 6)
    del add_n[:]

    # Aggregation is also limited by the memory.
    imperative_grad._MIN_AGGREGATE_BYTES = 10000
    _ = backprop.implicit_grad(fn)()
    self.assertEqual(len(add_n), 2)

    imperative_grad._MIN_AGGREGATE_COUNT = old_cnt
    imperative_grad._MIN_AGGREGATE_BYTES = old_bytes
    # pylint: enable=protected-access
    context.context().clear_post_execution_callbacks()
开发者ID:DjangoPeng,项目名称:tensorflow,代码行数:41,代码来源:backprop_test.py


示例13: testImplicitGradWithResourceVariable

  def testImplicitGradWithResourceVariable(self):
    x = resource_variable_ops.ResourceVariable(
        initial_value=constant_op.constant(1.0), name='x')

    def fn():
      b = constant_op.constant(2.0)
      c = math_ops.add(x.value(), b)
      return math_ops.add(c, constant_op.constant(3.0))

    grads_and_vars = backprop.implicit_grad(fn)()
    self.assertAllEqual(grads_and_vars[0][0], 1.0)
    self.assertAllEqual(id(grads_and_vars[0][1]), id(x))
开发者ID:Wajih-O,项目名称:tensorflow,代码行数:12,代码来源:backprop_test.py


示例14: testMultiValueConvertToTensor

  def testMultiValueConvertToTensor(self):
    x = resource_variable_ops.ResourceVariable(
        initial_value=array_ops.constant([1.0]), name='x')

    def fn():
      a = math_ops.add(x.value(), 1.0)
      # Make sure convert_to_tensor works correctly with list of TensorNodes.
      b = array_ops.stack([a, a], axis=0)
      return math_ops.reduce_mean(b)

    grad = backprop.implicit_grad(fn)()[0][0]
    self.assertAllEqual([1.0], grad)
开发者ID:Wajih-O,项目名称:tensorflow,代码行数:12,代码来源:backprop_test.py


示例15: testDifferentShapesEager

  def testDifferentShapesEager(self):
    # Checks that kernel caching does not cause sharing of temporary storage
    # across different input shapes when executing eagerly.
    with context.eager_mode():
      with ops.device("gpu:0"):
        first_output, _ = cudnn_rnn.CudnnGRU(1, 100)(
            array_ops.zeros([28, 100, 28]))
        second_output, _ = cudnn_rnn.CudnnGRU(1, 100)(
            array_ops.zeros([28, 100, 100]))
        self.assertAllEqual([28, 100, 100], first_output.shape)
        self.assertAllEqual([28, 100, 100], second_output.shape)

        def _LossFunc():
          first_output, _ = cudnn_rnn.CudnnGRU(1, 100)(
              array_ops.zeros([28, 100, 28]))
          second_output, _ = cudnn_rnn.CudnnGRU(1, 100)(
              array_ops.zeros([28, 100, 100]))
          return (math_ops.reduce_sum(first_output) +
                  math_ops.reduce_sum(second_output))

        backprop.implicit_grad(_LossFunc)()
开发者ID:AndrewTwinz,项目名称:tensorflow,代码行数:21,代码来源:cudnn_rnn_test.py


示例16: testGradientTensorConversionWithDefun

  def testGradientTensorConversionWithDefun(self):
    three = resource_variable_ops.ResourceVariable(3.0, name='v')

    @def_function.function
    def f(x):
      return math_ops.add(x, three)

    def g(x):
      return f(x)

    g = backprop.implicit_grad(g)(constant_op.constant(1.0))[0][0]
    self.assertAllEqual(g, 1.0)
开发者ID:JonathanRaiman,项目名称:tensorflow,代码行数:12,代码来源:function_gradients_test.py


示例17: testImplicitGradOrdering

  def testImplicitGradOrdering(self):
    v0 = resource_variable_ops.ResourceVariable(1.0)
    v1 = resource_variable_ops.ResourceVariable(2.0)

    def f():
      x = v1 * v1
      y = v0 * v0
      return x + y

    grads = backprop.implicit_grad(f)()
    ordered_variables = [x[1] for x in grads]
    self.assertTrue(ordered_variables[0] is v0)
    self.assertTrue(ordered_variables[1] is v1)
开发者ID:Wajih-O,项目名称:tensorflow,代码行数:13,代码来源:backprop_test.py


示例18: testGradientTensorConversionWithDefun

  def testGradientTensorConversionWithDefun(self):
    three = tensor.Tensor(3.0)

    @function.defun
    def f(x):
      return math_ops.add(x, three)

    def g(x):
      tape.watch(three)
      return f(x)

    g = backprop.implicit_grad(g)(tensor.Tensor(1.0))[0][1]
    self.assertEqual(g.numpy(), 1.0)
开发者ID:chdinh,项目名称:tensorflow,代码行数:13,代码来源:function_test.py


示例19: step_fn

      def step_fn(ctx, *inputs):
        """Function to run one iteration with one input."""
        gradients_fn = backprop.implicit_grad(self._loss_fn)
        gradients_fn = optimizer_lib.get_filtered_grad_fn(gradients_fn)

        grads_and_vars = self.distribution.call_for_each_replica(
            gradients_fn, args=(ctx,) + inputs)
        # If threads use layers, then we need to run the first step
        # sequentially, so that layers.build() is not executed in parallel.
        # Otherwise, multiple sets of mirrored variables are going to be
        # created.
        return self._optimizer._distributed_apply(  # pylint: disable=protected-access
            self.distribution, grads_and_vars)
开发者ID:abhinav-upadhyay,项目名称:tensorflow,代码行数:13,代码来源:step_fn.py


示例20: step

  def step(self, inputs):
    with self._distribution.scope():
      gradients_fn = backprop.implicit_grad(self._loss_fn)
      gradients_fn = optimizer_lib.get_filtered_grad_fn(gradients_fn)

      grads_and_vars = self.distribution.call_for_each_tower(
          gradients_fn, inputs, run_concurrently=self._is_run_concurrently)
      # If threads use layers, then we need to run the first step sequentially,
      # so that layers.build() is not executed in parallel.  Otherwise, multiple
      # sets of mirrored variables are going to be created.
      self._is_run_concurrently = True
      return self._optimizer._distributed_apply(  # pylint: disable=protected-access
          self.distribution, grads_and_vars)
开发者ID:BhaskarNallani,项目名称:tensorflow,代码行数:13,代码来源:step_fn.py



注:本文中的tensorflow.python.eager.backprop.implicit_grad函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。


鲜花

握手

雷人

路过

鸡蛋
该文章已有0人参与评论

请发表评论

全部评论

专题导读
上一篇:
Python backprop.implicit_val_and_grad函数代码示例发布时间:2022-05-27
下一篇:
Python backprop.gradients_function函数代码示例发布时间:2022-05-27
热门推荐
阅读排行榜

扫描微信二维码

查看手机版网站

随时了解更新最新资讯

139-2527-9053

在线客服(服务时间 9:00~18:00)

在线QQ客服
地址:深圳市南山区西丽大学城创智工业园
电邮:jeky_zhao#qq.com
移动电话:139-2527-9053

Powered by 互联科技 X3.4© 2001-2213 极客世界.|Sitemap