• 设为首页
  • 点击收藏
  • 手机版
    手机扫一扫访问
    迪恩网络手机版
  • 关注官方公众号
    微信扫一扫关注
    公众号

Python ops.enable_eager_execution函数代码示例

原作者: [db:作者] 来自: [db:来源] 收藏 邀请

本文整理汇总了Python中tensorflow.python.framework.ops.enable_eager_execution函数的典型用法代码示例。如果您正苦于以下问题:Python enable_eager_execution函数的具体用法?Python enable_eager_execution怎么用?Python enable_eager_execution使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。



在下文中一共展示了enable_eager_execution函数的10个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。

示例1: wrapper

 def wrapper(*args, **kwargs):
   try:
     return fn(*args, **kwargs)
   finally:
     del context._context
     context._context = context.Context()
     ops.enable_eager_execution()
开发者ID:kylin9872,项目名称:tensorflow,代码行数:7,代码来源:config_test.py


示例2: setUp

  def setUp(self):
    # test for enable eager test
    ops.enable_eager_execution()
    self.assertTrue(context.executing_eagerly())

    # Calling enable eager execution a second time should not cause an error.
    ops.enable_eager_execution()
    self.assertTrue(context.executing_eagerly())
开发者ID:adit-chandra,项目名称:tensorflow,代码行数:8,代码来源:ops_enable_eager_test.py


示例3: enable_v2_behavior

def enable_v2_behavior():
  """Enables TensorFlow 2.x behaviors.

  This function can be called at the beginning of the program (before `Tensors`,
  `Graphs` or other structures have been created, and before devices have been
  initialized. It switches all global behaviors that are different between
  TensorFlow 1.x and 2.x to behave as intended for 2.x.

  This function is called in the main TensorFlow `__init__.py` file, user should
  not need to call it, except during complex migrations.
  """
  tf2.enable()  # Switches TensorArrayV2 and control flow V2
  ops.enable_eager_execution()
  tensor_shape.enable_v2_tensorshape()  # Also switched by tf2
  variable_scope.enable_resource_variables()
开发者ID:adit-chandra,项目名称:tensorflow,代码行数:15,代码来源:v2_compat.py


示例4: main

def main(_):
  if flags.FLAGS.enable_eager:
    ops.enable_eager_execution()
    logging.info('Eager execution enabled for MNIST Multi-Worker.')
  else:
    logging.info('Eager execution not enabled for MNIST Multi-Worker.')

  # Build the train and eval datasets from the MNIST data.
  train_ds, eval_ds = get_input_datasets()

  if flags.FLAGS.distribution_strategy == 'multi_worker_mirrored':
    # MultiWorkerMirroredStrategy for multi-worker distributed MNIST training.
    strategy = collective_strategy.CollectiveAllReduceStrategy()
  else:
    raise ValueError('Only `multi_worker_mirrored` is supported strategy '
                     'in Keras MNIST example at this time. Strategy passed '
                     'in is %s' % flags.FLAGS.distribution_strategy)

  # Create and compile the model under Distribution strategy scope.
  # `fit`, `evaluate` and `predict` will be distributed based on the strategy
  # model was compiled with.
  with strategy.scope():
    model = get_model()
    optimizer = rmsprop.RMSProp(learning_rate=0.001)
    model.compile(
        loss=keras.losses.categorical_crossentropy,
        optimizer=optimizer,
        metrics=['accuracy'])

  # Train the model with the train dataset.
  tensorboard_callback = keras.callbacks.TensorBoard(
      log_dir=flags.FLAGS.model_dir)
  model.fit(
      x=train_ds,
      epochs=20,
      steps_per_epoch=468,
      callbacks=[tensorboard_callback])

  # Evaluate the model with the eval dataset.
  score = model.evaluate(eval_ds, steps=10, verbose=0)
  logging.info('Test loss:{}'.format(score[0]))
  logging.info('Test accuracy:{}'.format(score[1]))
开发者ID:aritratony,项目名称:tensorflow,代码行数:42,代码来源:mnist_multi_worker.py


示例5: test_no_loss_in_call

        loss='sparse_categorical_crossentropy',
        optimizer=RMSPropOptimizer(learning_rate=0.001))
    x = np.ones((100, 4), dtype=np.float32)
    np.random.seed(123)
    y = np.random.randint(0, 1, size=(100, 1))
    dataset = dataset_ops.Dataset.from_tensor_slices((x, y))
    dataset = dataset.repeat(100)
    dataset = dataset.batch(10)
    iterator = dataset.make_one_shot_iterator()
    history = model.fit(iterator, epochs=1, steps_per_epoch=10)
    self.assertEqual(np.around(history.history['loss'][-1], decimals=4), 0.6173)

  def test_no_loss_in_call(self):

    class HasLoss(keras.layers.Layer):

      def call(self, x):
        self.add_loss(x)
        return x

    layer = HasLoss()
    with self.assertRaises(RuntimeError):
      layer(1.)

    with ops.Graph().as_default():
      layer(1.)

if __name__ == '__main__':
  ops.enable_eager_execution()
  test.main()
开发者ID:ThunderQi,项目名称:tensorflow,代码行数:30,代码来源:training_eager_test.py


示例6: train

    @function.defun
    def train():
      v = resource_variable_ops.ResourceVariable(1.0)
      grad = backprop.implicit_grad(loss)(v)
      optimizer.apply_gradients(grad)
      return v.read_value()

    train()

  def testOptimizerInDefunWithCapturedVariable(self):
    v = resource_variable_ops.ResourceVariable(1.0)
    def loss():
      return v**2

    optimizer = momentum.MomentumOptimizer(learning_rate=1.0, momentum=1.0)

    @function.defun
    def train():
      grad = backprop.implicit_grad(loss)()
      optimizer.apply_gradients(grad)

    train()
    self.assertEqual(v.numpy(), -1.0)


if __name__ == '__main__':
  ops.enable_eager_execution(
      config=config_pb2.ConfigProto(device_count={'CPU': 3}))
  test.main()
开发者ID:StephenOman,项目名称:tensorflow,代码行数:29,代码来源:function_test.py


示例7: testOptimization

  def testOptimization(self):
    dataset = dataset_ops.Dataset.range(10)
    dataset = dataset.apply(optimization.assert_next(["MemoryCacheImpl"]))
    dataset = dataset.skip(0)  # this should be optimized away
    dataset = dataset.cache()

    options = dataset_ops.Options()
    options.experimental_optimization.noop_elimination = True
    dataset = dataset.with_options(options)

    multi_device_iterator = multi_device_iterator_ops.MultiDeviceIterator(
        dataset, ["/cpu:1", "/cpu:2"])

    config = config_pb2.ConfigProto(device_count={"CPU": 3})
    with self.test_session(config=config):
      self.evaluate(multi_device_iterator.initializer)
      for i in range(0, 10, 2):
        elem_on_1, elem_on_2 = multi_device_iterator.get_next()
        self.assertEqual(i, self.evaluate(elem_on_1))
        self.assertEqual(i + 1, self.evaluate(elem_on_2))
      with self.assertRaises(errors.OutOfRangeError):
        elem_on_1, elem_on_2 = multi_device_iterator.get_next()
        self.evaluate(elem_on_1)
        self.evaluate(elem_on_2)


if __name__ == "__main__":
  ops.enable_eager_execution(
      config=config_pb2.ConfigProto(device_count={"CPU": 3, "GPU": 1}))
  test.main()
开发者ID:Wajih-O,项目名称:tensorflow,代码行数:30,代码来源:multi_device_iterator_test.py


示例8: main

def main(argv=None):  # pylint: disable=function-redefined
  _ops.enable_eager_execution()
  _test.main(argv)
开发者ID:JonathanRaiman,项目名称:tensorflow,代码行数:3,代码来源:test.py


示例9: MultiDeviceTest

class MultiDeviceTest(xla_test.XLATestCase):
  """Test running TPU computation on more than one core."""

  def testBasic(self):
    if not multiple_tpus():
      self.skipTest('MultiDeviceTest requires multiple TPU devices.')

    # Compute 10 on TPU core 0
    with ops.device('device:TPU:0'):
      two = constant_op.constant(2)
      five = constant_op.constant(5)
      ten = two * five
      self.assertAllEqual(10, ten)

    # Compute 6 on TPU core 1
    with ops.device('device:TPU:1'):
      two = constant_op.constant(2)
      three = constant_op.constant(3)
      six = two * three
      self.assertAllEqual(6, six)

    # Copy 10 and 6 to CPU and sum them
    self.assertAllEqual(16, ten + six)


if __name__ == '__main__':
  ops.enable_eager_execution(
      config=config_pb2.ConfigProto(log_device_placement=True))
  googletest.main()
开发者ID:JonathanRaiman,项目名称:tensorflow,代码行数:29,代码来源:eager_test.py


示例10: main

def main(argv=None):
  _ops.enable_eager_execution()
  _test.main(argv)
开发者ID:AbhinavJain13,项目名称:tensorflow,代码行数:3,代码来源:test.py



注:本文中的tensorflow.python.framework.ops.enable_eager_execution函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。


鲜花

握手

雷人

路过

鸡蛋
该文章已有0人参与评论

请发表评论

全部评论

专题导读
上一篇:
Python ops.get_collection函数代码示例发布时间:2022-05-27
下一篇:
Python ops.device函数代码示例发布时间:2022-05-27
热门推荐
阅读排行榜

扫描微信二维码

查看手机版网站

随时了解更新最新资讯

139-2527-9053

在线客服(服务时间 9:00~18:00)

在线QQ客服
地址:深圳市南山区西丽大学城创智工业园
电邮:jeky_zhao#qq.com
移动电话:139-2527-9053

Powered by 互联科技 X3.4© 2001-2213 极客世界.|Sitemap