• 设为首页
  • 点击收藏
  • 手机版
    手机扫一扫访问
    迪恩网络手机版
  • 关注官方公众号
    微信扫一扫关注
    公众号

Python tensorflow.executing_eagerly函数代码示例

原作者: [db:作者] 来自: [db:来源] 收藏 邀请

本文整理汇总了Python中tensorflow.executing_eagerly函数的典型用法代码示例。如果您正苦于以下问题:Python executing_eagerly函数的具体用法?Python executing_eagerly怎么用?Python executing_eagerly使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。



在下文中一共展示了executing_eagerly函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。

示例1: _value_and_gradients

def _value_and_gradients(fn, fn_arg_list, result=None, grads=None, name=None):
  """Helper to `maybe_call_fn_and_grads`."""
  with tf.name_scope(name, 'value_and_gradients', [fn_arg_list, result, grads]):
    def _convert_to_tensor(x, name):
      ctt = lambda x_: x_ if x_ is None else tf.convert_to_tensor(x_, name=name)
      return [ctt(x_) for x_ in x] if is_list_like(x) else ctt(x)

    fn_arg_list = (list(fn_arg_list) if is_list_like(fn_arg_list)
                   else [fn_arg_list])
    fn_arg_list = _convert_to_tensor(fn_arg_list, 'fn_arg')

    if result is None:
      result = fn(*fn_arg_list)
      if grads is None and tf.executing_eagerly():
        # Ensure we disable bijector cacheing in eager mode.
        # TODO(b/72831017): Remove this once bijector cacheing is fixed for
        # eager mode.
        fn_arg_list = [0 + x for x in fn_arg_list]

    result = _convert_to_tensor(result, 'fn_result')

    if grads is not None:
      grads = _convert_to_tensor(grads, 'fn_grad')
      return result, grads

    if tf.executing_eagerly():
      if is_list_like(result) and len(result) == len(fn_arg_list):
        # Compute the block diagonal of Jacobian.
        # TODO(b/79158574): Guard this calculation by an arg which explicitly
        # requests block diagonal Jacobian calculation.
        def make_fn_slice(i):
          """Needed to prevent `cell-var-from-loop` pylint warning."""
          return lambda *args: fn(*args)[i]
        grads = [
            tfe.gradients_function(make_fn_slice(i))(*fn_arg_list)[i]
            for i in range(len(result))
        ]
      else:
        grads = tfe.gradients_function(fn)(*fn_arg_list)
    else:
      if is_list_like(result) and len(result) == len(fn_arg_list):
        # Compute the block diagonal of Jacobian.
        # TODO(b/79158574): Guard this calculation by an arg which explicitly
        # requests block diagonal Jacobian calculation.
        grads = [tf.gradients(result[i], fn_arg_list[i])[0]
                 for i in range(len(result))]
      else:
        grads = tf.gradients(result, fn_arg_list)

    return result, grads
开发者ID:asudomoeva,项目名称:probability,代码行数:50,代码来源:util.py


示例2: testEventShape

  def testEventShape(self):
    # Shape is always known for reshaping in eager mode, so we skip these tests.
    if tf.executing_eagerly():
      return

    event_shape_in, event_shape_out = self.build_shapes([2, 3], [6])
    bijector = tfb.Reshape(
        event_shape_out=event_shape_out,
        event_shape_in=event_shape_in,
        validate_args=True)

    self.assertEqual(
        bijector.forward_event_shape(tf.TensorShape([4, 2, 3])).as_list(),
        [4, None])
    self.assertEqual(
        bijector.forward_event_shape(tf.TensorShape([None, 2, 3])).as_list(),
        [None, None])
    self.assertEqual(
        bijector.inverse_event_shape(tf.TensorShape([4, 6])).as_list(),
        [4, None, None])
    self.assertEqual(
        bijector.inverse_event_shape(tf.TensorShape([None, 6])).as_list(),
        [None, None, None])
    # If the input shape is totally unknown, there's nothing we can do!
    self.assertIsNone(
        bijector.forward_event_shape(tf.TensorShape(None)).ndims)
开发者ID:asudomoeva,项目名称:probability,代码行数:26,代码来源:reshape_test.py


示例3: testSampleWithSameSeed

  def testSampleWithSameSeed(self):
    if tf.executing_eagerly():
      return
    scale = make_pd(1., 2)
    df = 4

    chol_w = tfd.Wishart(
        df, scale_tril=chol(scale), input_output_cholesky=False)

    x = self.evaluate(chol_w.sample(1, seed=42))
    chol_x = [chol(x[0])]

    full_w = tfd.Wishart(df, scale, input_output_cholesky=False)
    self.assertAllClose(x, self.evaluate(full_w.sample(1, seed=42)))

    chol_w_chol = tfd.Wishart(
        df, scale_tril=chol(scale), input_output_cholesky=True)
    self.assertAllClose(chol_x, self.evaluate(chol_w_chol.sample(1, seed=42)))
    eigen_values = tf.matrix_diag_part(chol_w_chol.sample(1000, seed=42))
    np.testing.assert_array_less(0., self.evaluate(eigen_values))

    full_w_chol = tfd.Wishart(df, scale=scale, input_output_cholesky=True)
    self.assertAllClose(chol_x, self.evaluate(full_w_chol.sample(1, seed=42)))
    eigen_values = tf.matrix_diag_part(full_w_chol.sample(1000, seed=42))
    np.testing.assert_array_less(0., self.evaluate(eigen_values))
开发者ID:asudomoeva,项目名称:probability,代码行数:25,代码来源:wishart_test.py


示例4: _call

  def _call(self, *args, **kwargs):
    """Entry point when a module is called to connect it to the graph.

    This is the entry point when users connect a Module into the Graph. The
    underlying _build method will have been wrapped in a Template by the
    constructor, and we call this template with the provided inputs here.

    Note we use `_call` instead of `__call__` to allow instance level monkey
    patching (see `defun`).

    Args:
      *args: Arguments for underlying _build method.
      **kwargs: Keyword arguments for underlying _build method.

    Returns:
      The result of the underlying _build method.
    """
    self._check_init_called()
    self._check_same_graph()
    with self._capture_variables():
      outputs, subgraph_name_scope = self._template(*args, **kwargs)
    self._is_connected = True
    if not tf.executing_eagerly():
      # In eager mode the module is called a lot more frequently than in graph
      # mode (for each training step) and so we don't keep track of connected
      # subgraphs (since there will be orders of magnitude more of them).
      self._add_connected_subgraph(self._build, outputs, subgraph_name_scope,
                                   *args, **kwargs)
    return outputs
开发者ID:ccchang0111,项目名称:sonnet,代码行数:29,代码来源:base.py


示例5: testRegularizers

  def testRegularizers(self, trainable, state_size):
    batch_size = 6

    # Set the attribute to the class since it we can't set properties of
    # abstract classes
    snt.RNNCore.state_size = state_size
    flat_state_size = nest.flatten(state_size)
    core = snt.RNNCore(name="dummy_core")
    flat_regularizer = ([tf.contrib.layers.l1_regularizer(scale=0.5)] *
                        len(flat_state_size))
    trainable_regularizers = nest.pack_sequence_as(
        structure=state_size, flat_sequence=flat_regularizer)

    core.initial_state(batch_size, dtype=tf.float32, trainable=trainable,
                       trainable_regularizers=trainable_regularizers)

    graph_regularizers = tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES)
    if not trainable:
      self.assertFalse(graph_regularizers)
    else:
      self.assertEqual(len(graph_regularizers), len(flat_state_size))
      if not tf.executing_eagerly():
        for i in range(len(flat_state_size)):
          self.assertRegexpMatches(
              graph_regularizers[i].name, ".*l1_regularizer.*")
开发者ID:ccchang0111,项目名称:sonnet,代码行数:25,代码来源:rnn_core_test.py


示例6: testInitialStateNames

  def testInitialStateNames(self):
    if tf.executing_eagerly():
      return self.skipTest("Tensor.name is meaningless in eager mode.")

    hidden_size_a = 3
    hidden_size_b = 4
    batch_size = 5
    deep_rnn = snt.DeepRNN(
        [snt.LSTM(hidden_size_a, name="a"), snt.LSTM(hidden_size_b, name="b")])
    deep_rnn_state = deep_rnn.initial_state(batch_size, trainable=True)
    self.assertEqual(
        deep_rnn_state[0][0].name,
        "deep_rnn_initial_state/a_initial_state/state_hidden_tiled:0")
    self.assertEqual(
        deep_rnn_state[0][1].name,
        "deep_rnn_initial_state/a_initial_state/state_cell_tiled:0")
    self.assertEqual(
        deep_rnn_state[1][0].name,
        "deep_rnn_initial_state/b_initial_state/state_hidden_tiled:0")
    self.assertEqual(
        deep_rnn_state[1][1].name,
        "deep_rnn_initial_state/b_initial_state/state_cell_tiled:0")

    other_start_state = deep_rnn.initial_state(
        batch_size, trainable=True, name="blah")
    self.assertEqual(other_start_state[0][0].name,
                     "blah/a_initial_state/state_hidden_tiled:0")
    self.assertEqual(other_start_state[0][1].name,
                     "blah/a_initial_state/state_cell_tiled:0")
    self.assertEqual(other_start_state[1][0].name,
                     "blah/b_initial_state/state_hidden_tiled:0")
    self.assertEqual(other_start_state[1][1].name,
                     "blah/b_initial_state/state_cell_tiled:0")
开发者ID:ccchang0111,项目名称:sonnet,代码行数:33,代码来源:basic_rnn_test.py


示例7: testActivateBiasFlags

  def testActivateBiasFlags(self, activate_final, use_bias, use_dropout):
    mlp = snt.nets.MLP(name=self.module_name,
                       output_sizes=self.output_sizes,
                       activate_final=activate_final,
                       use_bias=use_bias,
                       use_dropout=use_dropout)

    inputs = tf.random_normal(
        dtype=tf.float32, shape=[self.batch_size, self.input_size])
    net = mlp(inputs)

    if not tf.executing_eagerly():
      if activate_final:
        self.assertEqual(net.op.type, "Relu")
      elif use_bias:
        self.assertEqual(net.op.type, "Add")
      else:
        self.assertEqual(net.op.type, "MatMul")

    variables = mlp.get_variables()

    if use_bias:
      self.assertEqual(len(variables), len(self.output_sizes) * 2)
    else:
      self.assertEqual(len(variables), len(self.output_sizes))
开发者ID:ccchang0111,项目名称:sonnet,代码行数:25,代码来源:mlp_test.py


示例8: test_copy_layers

 def test_copy_layers(self):
   """Test copying layers."""
   tg = dc.models.TensorGraph()
   features = Feature(shape=(None, 10))
   dense = Dense(
       10, in_layers=features, biases_initializer=tf.random_normal_initializer)
   constant = Constant(10.0)
   output = dense + constant
   tg.add_output(output)
   tg.set_loss(output)
   tg.fit_generator([])
   replacements = {constant: Constant(20.0)}
   copy = output.copy(replacements, tg)
   assert isinstance(copy, Add)
   assert isinstance(copy.in_layers[0], Dense)
   assert isinstance(copy.in_layers[0].in_layers[0], Feature)
   assert copy.in_layers[1] == replacements[constant]
   variables = tg.get_layer_variables(dense)
   with tg._get_tf("Graph").as_default():
     if tf.executing_eagerly():
       values = [v.numpy() for v in variables]
     else:
       values = tg.session.run(variables)
   for v1, v2 in zip(values, copy.in_layers[0].variable_values):
     assert np.array_equal(v1, v2)
开发者ID:ktaneishi,项目名称:deepchem,代码行数:25,代码来源:test_tensor_graph.py


示例9: _set_seed

def _set_seed(seed):
  """Helper which uses graph seed if using TFE."""
  # TODO(b/68017812): Deprecate once TFE supports seed.
  if tf.executing_eagerly():
    tf.set_random_seed(seed)
    return None
  return seed
开发者ID:asudomoeva,项目名称:probability,代码行数:7,代码来源:replica_exchange_mc_test.py


示例10: testCustomGetter

  def testCustomGetter(self):
    custom_getter = snt.custom_getters.Context(snt.custom_getters.stop_gradient)
    module = snt.nets.ConvNet2D(output_channels=self.output_channels,
                                kernel_shapes=self.kernel_shapes,
                                rates=self.rates,
                                strides=self.strides,
                                paddings=self.paddings,
                                custom_getter=custom_getter)

    input_shape = [10, 100, 100, 3]
    input_to_net = tf.random_normal(dtype=tf.float32, shape=input_shape)

    if tf.executing_eagerly():
      with tf.GradientTape() as tape0:
        out0 = module(input_to_net)
      with tf.GradientTape() as tape1:
        with custom_getter:
          out1 = module(input_to_net)
      all_vars = tf.trainable_variables()
      out0_grads = tape0.gradient(out0, all_vars)
      out1_grads = tape1.gradient(out1, all_vars)

    else:
      out0 = module(input_to_net)
      with custom_getter:
        out1 = module(input_to_net)
      all_vars = tf.trainable_variables()
      out0_grads = tf.gradients(out0, all_vars)
      out1_grads = tf.gradients(out1, all_vars)

    for grad in out0_grads:
      self.assertNotEqual(None, grad)
    self.assertEqual([None] * len(out1_grads), out1_grads)
开发者ID:ccchang0111,项目名称:sonnet,代码行数:33,代码来源:convnet_test.py


示例11: testReprWorksCorrectlyMultivariate

  def testReprWorksCorrectlyMultivariate(self):
    mvn_static = tfd.MultivariateNormalDiag(
        loc=np.zeros([2, 2]), name="MVN")
    self.assertEqual(
        repr(mvn_static),
        "<tfp.distributions.MultivariateNormalDiag"
        " 'MVN/'"
        " batch_shape=(2,)"
        " event_shape=(2,)"
        " dtype=float64>")

    # There's no notion of partially known shapes in eager mode, so exit
    # early.
    if tf.executing_eagerly():
      return

    mvn_dynamic = tfd.MultivariateNormalDiag(
        loc=tf.placeholder_with_default(
            input=np.ones((3, 3), dtype=np.float32), shape=[None, 3]),
        name="MVN2")
    self.assertEqual(
        repr(mvn_dynamic),
        "<tfp.distributions.MultivariateNormalDiag"
        " 'MVN2/'"
        " batch_shape=(?,)"  # Partially known.
        " event_shape=(3,)"
        " dtype=float32>")
开发者ID:asudomoeva,项目名称:probability,代码行数:27,代码来源:distribution_test.py


示例12: testDataFormat

  def testDataFormat(self, module, data_format):
    net = module(
        output_channels=self.output_channels,
        kernel_shapes=self.kernel_shapes,
        strides=self.strides,
        paddings=self.paddings,
        data_format=data_format)

    input_height, input_width, input_channels = 100, 100, 3
    batch_size = 10
    final_channel = self.output_channels[-1]
    if data_format == "NHWC":
      input_shape = [batch_size, input_height, input_width, input_channels]
      expected_output_shape = [
          batch_size, input_height, input_width, final_channel
      ]

    else:
      input_shape = [batch_size, input_channels, input_height, input_width]
      expected_output_shape = [
          batch_size, final_channel, input_height, input_width
      ]
    input_to_net = tf.random_normal(dtype=tf.float32, shape=input_shape)

    if tf.executing_eagerly() and data_format == "NCHW":
      expected_exception = (
          tf.errors.UnimplementedError
          if module == snt.nets.ConvNet2D else tf.errors.InvalidArgumentError)
      with self.assertRaisesRegexp(expected_exception, "only supports NHWC"):
        output = net(input_to_net)

    else:
      output = net(input_to_net)
      self.assertEqual(output.get_shape().as_list(), expected_output_shape)
开发者ID:ccchang0111,项目名称:sonnet,代码行数:34,代码来源:convnet_test.py


示例13: testNormalizations

  def testNormalizations(self, conv_ctor, norm_ctor, norm_kwargs):
    if tf.executing_eagerly():
      self.skipTest("Cannot test normalization correctness in Eager.")
    module = conv_ctor(
        output_channels=[16, 16],
        kernel_shapes=(3,),
        strides=(1,),
        paddings=("SAME",),
        normalization_ctor=norm_ctor,
        normalization_kwargs=norm_kwargs,
        normalize_final=True,
        activate_final=False)  # No final activation, that would un-normalize.
    inputs = tf.random_uniform([16, 48, 64, 3])
    output = module(inputs)
    with tf.train.SingularMonitoredSession() as session:
      output_np = session.run(output)

    # Convert the output into something where all the dimensions that should be
    # jointly normalized are combined to be on axis=1.
    if "axis" in norm_kwargs and norm_kwargs["axis"] == [1, 2]:
      # Check for instance normalization - combine spatial dimensions.
      output_np = np.reshape(output_np, [16, -1, 3])
    else:
      # Check for layer normalization - combine all non-batch dimensions.
      output_np = np.reshape(output_np, [16, -1])
    mean = np.mean(output_np, axis=1)
    std_dev = np.std(output_np, axis=1)
    # High tolerance - summing across big images, this normalization is fairly
    # approximate.
    self.assertAllClose(mean, np.zeros_like(mean), atol=2e-2)
    self.assertAllClose(std_dev, np.ones_like(std_dev), atol=2e-2)
开发者ID:ccchang0111,项目名称:sonnet,代码行数:31,代码来源:convnet_test.py


示例14: _check_same_graph

  def _check_same_graph(self):
    """Checks that the module is not being connect to multiple Graphs.

    An instance of a Sonnet module 'owns' the variables it contains, and permits
    seamless variable sharing. As such, connecting a single module instance to
    multiple Graphs is not possible - this function will raise an error should
    that occur.

    Raises:
      DifferentGraphError: if the module is connected to a different Graph than
        it was previously used in.
    """
    with ops.init_scope():
      # We need `init_scope` incase we're running inside a defun. In that case
      # what we want is information about where the function will be called not
      # where the function is being built.
      current_graph = tf.get_default_graph()
      will_call_in_eager_context = tf.executing_eagerly()

    if self._graph is None:
      self._graph = current_graph
      self._set_module_info()

    if not will_call_in_eager_context:
      # Same graph checks only make sense when calling from graph mode (in eager
      # mode there is a single process level context where all modules are
      # created).
      if self._graph != current_graph:
        raise DifferentGraphError("Cannot connect module to multiple Graphs.")
开发者ID:ccchang0111,项目名称:sonnet,代码行数:29,代码来源:base.py


示例15: testRegularizersInRegularizationLosses

  def testRegularizersInRegularizationLosses(self, transpose, use_bias):
    if transpose:
      module = functools.partial(snt.nets.ConvNet2DTranspose,
                                 output_shapes=[[100, 100]])
    else:
      module = snt.nets.ConvNet2D
    if use_bias:
      regularizers = {"w": tf.contrib.layers.l1_regularizer(scale=0.5),
                      "b": tf.contrib.layers.l2_regularizer(scale=0.5)}
    else:
      regularizers = {"w": tf.contrib.layers.l1_regularizer(scale=0.5)}

    model = module(output_channels=self.output_channels,
                   kernel_shapes=self.kernel_shapes,
                   strides=self.strides,
                   paddings=self.paddings,
                   use_bias=use_bias,
                   regularizers=regularizers)

    input_to_net = tf.random_normal(dtype=tf.float32, shape=(1, 100, 100, 3))
    model(input_to_net)

    regularizers = tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES)
    expected_num_regularizers = 3 * (2 if use_bias else 1)
    self.assertLen(regularizers, expected_num_regularizers)
    if not tf.executing_eagerly():
      self.assertRegexpMatches(regularizers[0].name, ".*l1_regularizer.*")
      if use_bias:
        self.assertRegexpMatches(regularizers[1].name, ".*l2_regularizer.*")
开发者ID:ccchang0111,项目名称:sonnet,代码行数:29,代码来源:convnet_test.py


示例16: test_gradients_and_propagation_of_nan_in_x

  def test_gradients_and_propagation_of_nan_in_x(self):
    # If x contains NaN, this should propagate through to y, and not mess up the
    # gradients associated with finite members of x.
    # In fact, even NaN members of x result in finite (zero) gradients.

    x_min = 0.
    x_max = 1.
    dtype = np.float32
    num_pts = 4

    implied_x_ref = np.linspace(x_min, x_max, num_pts, dtype=dtype)
    y_ref = 2 * implied_x_ref

    x_ = np.array([0., 0.1, np.nan, 0.4, 1.]).astype(dtype)
    y_expected = 2 * x_

    x = tf.constant(x_)

    with self.test_session():
      y = tfp.math.interp_regular_1d_grid(x, x_min, x_max, y_ref)
      y_ = self.evaluate(y)
      self.assertAllClose(y_, y_expected, atol=0, rtol=1e-6)
      if not tf.executing_eagerly():
        dy_dx_ = tf.gradients(y, x)[0].eval()
        self.assertAllClose([2., 2., 0., 2., 2.], dy_dx_)
开发者ID:asudomoeva,项目名称:probability,代码行数:25,代码来源:interpolation_test.py


示例17: compute_gradients

  def compute_gradients(self, f, args, grad_ys=None):
    """Computes gradients using tf.GradientTape or tf.gradients.

    Arguments:
      f: Function to be differentiated.
      args: List of `Tensor` arguments to be passed to the function `f`.
        Gradients are computed with respect to these arguments.
      grad_ys: Optional. A `Tensor` with the same shape as the `Tensor` returned
        by `f` that contains the incoming gradients with respect to the result
        of `f`.

    Returns:
      grads: List containing gradients of `f` with respect to `args`. It has the
        same length as `args`.
    """
    if tf.executing_eagerly():
      grad_fn = tf.contrib.eager.gradients_function(f)
      if grad_ys is not None:
        grads = grad_fn(*args, dy=grad_ys)
      else:
        grads = grad_fn(*args)
    else:
      res = f(*args)
      grads = tf.gradients(res, args, grad_ys=grad_ys)
    return self.evaluate(grads)
开发者ID:asudomoeva,项目名称:probability,代码行数:25,代码来源:test_case.py


示例18: _value_and_gradient

def _value_and_gradient(fn, *args):
  """Calls `fn` and computes the gradient of the result wrt `arg`."""
  if tf.executing_eagerly():
    v, g = tfe.value_and_gradients_function(fn)(args)
  else:
    v = fn(*args)
    g = tf.gradients(v, args)
  return v, g
开发者ID:asudomoeva,项目名称:probability,代码行数:8,代码来源:special_math_test.py


示例19: test_container_not_supported_in_eager

  def test_container_not_supported_in_eager(self):
    if not tf.executing_eagerly():
      self.skipTest("Skipping test in graph mode.")

    container = ReuseVarsTest.VariableContainer("name")
    with self.assertRaisesRegexp(ValueError,
                                 ".* not supported in eager mode .*"):
      container.method_with_reuse()
开发者ID:ccchang0111,项目名称:sonnet,代码行数:8,代码来源:util_test.py


示例20: grad_potential

  def grad_potential(self, position, check_numerics=True):
    """Get gradient of potential function at current location."""

    if tf.executing_eagerly():
      grad = tfe.gradients_function(self.potential)(position)[0]
    else:
      grad = tf.gradients(self.potential(position), position)[0]

    return grad
开发者ID:Ajaycs99,项目名称:tensorflow,代码行数:9,代码来源:l2hmc.py



注:本文中的tensorflow.executing_eagerly函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。


鲜花

握手

雷人

路过

鸡蛋
该文章已有0人参与评论

请发表评论

全部评论

专题导读
上一篇:
Python tensorflow.exp函数代码示例发布时间:2022-05-27
下一篇:
Python tensorflow.equal函数代码示例发布时间:2022-05-27
热门推荐
阅读排行榜

扫描微信二维码

查看手机版网站

随时了解更新最新资讯

139-2527-9053

在线客服(服务时间 9:00~18:00)

在线QQ客服
地址:深圳市南山区西丽大学城创智工业园
电邮:jeky_zhao#qq.com
移动电话:139-2527-9053

Powered by 互联科技 X3.4© 2001-2213 极客世界.|Sitemap