• 设为首页
  • 点击收藏
  • 手机版
    手机扫一扫访问
    迪恩网络手机版
  • 关注官方公众号
    微信扫一扫关注
    公众号

Python resource_variable_ops.var_is_initialized_op函数代码示例

原作者: [db:作者] 来自: [db:来源] 收藏 邀请

本文整理汇总了Python中tensorflow.python.ops.resource_variable_ops.var_is_initialized_op函数的典型用法代码示例。如果您正苦于以下问题:Python var_is_initialized_op函数的具体用法?Python var_is_initialized_op怎么用?Python var_is_initialized_op使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。



在下文中一共展示了var_is_initialized_op函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。

示例1: initialize_variables

 def initialize_variables():
   for v, init in initializer_map.items():
     with ops.init_scope():
       if resource_variable_ops.var_is_initialized_op(v.handle):
         # Ignore variables which are already initialized at trace time.
         continue
     v.assign(lift_to_graph.lift_to_graph(
         [init], ops.get_default_graph())[init])
开发者ID:kylin9872,项目名称:tensorflow,代码行数:8,代码来源:def_function.py


示例2: __call__

 def __call__(self, *args):
   initialized = [resource_variable_ops.var_is_initialized_op(
       v.handle).numpy() for v in self._call_fn.variables]
   if all(x for x in initialized):
     return self._call_fn(*args)
   elif all(not x for x in initialized):
     return self._init_fn(*args)
   else:
     raise ValueError("Some, but not all, variables are initialized.")
开发者ID:ilya-edrenkin,项目名称:tensorflow,代码行数:9,代码来源:graph_callable.py


示例3: variable_creator_scope

 def variable_creator_scope(unused_next_creator, **kwargs):
   kwargs["initialize"] = is_chief
   with ops.device(
       "/job:%s/task:%s" % (ps_job_name, ps_index[0] % num_ps_tasks)):
     ps_index[0] += 1
     v = SharedVariable(**kwargs)
     if not is_chief:
       while not resource_variable_ops.var_is_initialized_op(v.handle):
         time.sleep(10)
     return v
开发者ID:Ajaycs99,项目名称:tensorflow,代码行数:10,代码来源:parameter_server.py


示例4: testGPU

  def testGPU(self):
    with self.test_session(use_gpu=True) as sess:
      abc = variable_scope.get_variable(
          "abc",
          shape=[1],
          initializer=init_ops.ones_initializer(),
          use_resource=True)

      sess.run(variables.global_variables_initializer())
      self.assertEqual(
          resource_variable_ops.var_is_initialized_op(abc.handle).eval(), True)
      print(sess.run(abc))
开发者ID:piyushjaiswal98,项目名称:tensorflow,代码行数:12,代码来源:resource_variable_ops_test.py


示例5: fn_with_cond

 def fn_with_cond(*inner_args, **inner_kwds):
   """Conditionally runs initialization if it's needed."""
   condition = True
   for variable in self._created_variables:
     condition = condition and resource_variable_ops.var_is_initialized_op(
         variable.handle)
   # We want to call stateless_fn if possible because it avoids recomputing
   # potentially expensive initializers.
   return control_flow_ops.cond(
       condition,
       lambda: self._stateless_fn(*inner_args, **inner_kwds),
       _call_concrete(self._concrete_stateful_fn, inner_args, inner_kwds))
开发者ID:ThunderQi,项目名称:tensorflow,代码行数:12,代码来源:def_function.py


示例6: fn_with_cond

 def fn_with_cond(*inner_args, **inner_kwds):
   """Conditionally runs initialization if it's needed."""
   condition = True
   for variable in created_variables:
     condition = condition and resource_variable_ops.var_is_initialized_op(
         variable.handle)
   # We want to call second_graph_function if possible because it avoids
   # recomputing potentially expensive initializers.
   return control_flow_ops.cond(
       condition,
       lambda: second_graph_function(*inner_args, **inner_kwds),
       lambda: first_concrete(*inner_args, **inner_kwds))
开发者ID:daiwk,项目名称:tensorflow,代码行数:12,代码来源:def_function.py


示例7: testGPU

  def testGPU(self):
    with test_util.use_gpu():
      abc = variable_scope.get_variable(
          "abc",
          shape=[1],
          initializer=init_ops.ones_initializer(),
          use_resource=True)

      self.evaluate(variables.global_variables_initializer())
      self.assertEqual(
          self.evaluate(
              resource_variable_ops.var_is_initialized_op(abc.handle)),
          True)
开发者ID:aeverall,项目名称:tensorflow,代码行数:13,代码来源:resource_variable_ops_test.py


示例8: fn_with_cond

 def fn_with_cond(*inner_args, **inner_kwds):
   """Conditionally runs initialization if it's needed."""
   condition = True
   for wr in self._created_variables:
     variable = wr()
     if variable is None:
       raise ValueError(
           "A tf.Variable created inside your tf.function has been"
           " garbage-collected. Your code needs to keep Python references"
           " to variables created inside `tf.function`s.\n"
           "\n"
           "A common way to raise this error is to create and return a"
           " variable only referenced inside your function:\n"
           "\n"
           "@tf.function\n"
           "def f():\n"
           "  v = tf.Variable(1.0)\n"
           "  return v\n"
           "\n"
           "v = f()  # Crashes with this error message!\n"
           "\n"
           "The reason this crashes is that @tf.function annotated"
           " function returns a **`tf.Tensor`** with the **value** of the"
           " variable when the function is called rather than the"
           " variable instance itself. As such there is no code holding a"
           " reference to the `v` created inside the function and Python"
           " garbage collects it.\n"
           "\n"
           "The simplest way to fix this issue is to create variables"
           " outside the function and capture them:\n"
           "\n"
           "v = tf.Variable(1.0)\n"
           "\n"
           "@tf.function\n"
           "def f():\n"
           "  return v\n"
           "\n"
           "f()  # <tf.Tensor: ... numpy=1.>\n"
           "v.assign_add(1.)\n"
           "f()  # <tf.Tensor: ... numpy=2.>")
     condition = math_ops.logical_and(
         condition, resource_variable_ops.var_is_initialized_op(
             variable.handle))
   # We want to call stateless_fn if possible because it avoids recomputing
   # potentially expensive initializers.
   return control_flow_ops.cond(
       condition,
       lambda: self._stateless_fn(*inner_args, **inner_kwds),
       functools.partial(self._concrete_stateful_fn._filtered_call,  # pylint: disable=protected-access
                         inner_args, inner_kwds))
开发者ID:kylin9872,项目名称:tensorflow,代码行数:50,代码来源:def_function.py


示例9: testFunctionInitializationFunction

  def testFunctionInitializationFunction(self):

    state = []

    @def_function.function
    def fn(x):
      if not state:
        state.append(variables.Variable(2.0))
      return state[0] * x

    init_fn = fn.get_initialization_function(constant_op.constant(1.0))
    self.assertEqual(len(state), 1)
    self.assertFalse(
        resource_variable_ops.var_is_initialized_op(state[0].handle))
    init_fn()
    self.assertEqual(state[0].numpy(), 2.0)
开发者ID:rmlarsen,项目名称:tensorflow,代码行数:16,代码来源:def_function_test.py


示例10: __call__

  def __call__(self, *args):
    nest.assert_same_structure(self.shape_and_dtypes, args, check_types=False)
    if not all([
        shape.is_compatible_with(arg.shape)
        for shape, arg in zip(self.flattened_shapes, nest.flatten(args))
    ]):
      raise ValueError(
          "Declared shapes do not match argument shapes: Expected %s, found %s."
          % (self.flattened_shapes, [arg.shape for arg in nest.flatten(args)]))

    initialized = [resource_variable_ops.var_is_initialized_op(
        v.handle).numpy() for v in self._call_fn.variables]
    if all(x for x in initialized):
      return self._call_fn(*args)
    elif all(not x for x in initialized):
      return self._init_fn(*args)
    else:
      raise ValueError("Some, but not all, variables are initialized.")
开发者ID:rajeev921,项目名称:tensorflow,代码行数:18,代码来源:graph_callable.py


示例11: fn_with_cond

 def fn_with_cond(*inner_args, **inner_kwds):
   """Conditionally runs initialization if it's needed."""
   condition = True
   for wr in self._created_variables:
     variable = wr()
     if variable is None:
       raise ValueError(
           "Variable created in a tf.function garbage-collected. Code needs"
           " to keep python references to variables created in a"
           " tf.function.")
     condition = math_ops.logical_and(
         condition, resource_variable_ops.var_is_initialized_op(
             variable.handle))
   # We want to call stateless_fn if possible because it avoids recomputing
   # potentially expensive initializers.
   return control_flow_ops.cond(
       condition,
       lambda: self._stateless_fn(*inner_args, **inner_kwds),
       _call_concrete(self._concrete_stateful_fn, inner_args, inner_kwds))
开发者ID:becster,项目名称:tensorflow,代码行数:19,代码来源:def_function.py


示例12: __init__


#.........这里部分代码省略.........

    self._trainable = trainable
    self._save_slice_info = None
    # Store the graph key so optimizers know how to only retrieve variables from
    # this graph.
    self._graph_key = ops.get_default_graph()._graph_key  # pylint: disable=protected-access
    with ops.init_scope():
      self._in_graph_mode = not context.executing_eagerly()
      with ops.name_scope(name, "Variable", []
                          if init_from_fn else [initial_value]) as name:
        # pylint: disable=protected-access
        handle_name = ops._name_from_scope_name(name)
        shared_name = handle_name
        if init_from_fn:
          # Use attr_scope and device(None) to simulate the behavior of
          # colocate_with when the variable we want to colocate with doesn't
          # yet exist.
          if self._in_graph_mode:
            with ops.name_scope("Initializer"), ops.device(None):
              initial_value = ops.convert_to_tensor(
                  initial_value(), name="initial_value", dtype=dtype)
            self._handle = _eager_safe_variable_handle(
                shape=initial_value.get_shape(),
                dtype=initial_value.dtype.base_dtype,
                shared_name=shared_name,
                name=name,
                graph_mode=self._in_graph_mode)
            self._shape = initial_value.get_shape()
          else:
            initial_value = initial_value()
            with ops.name_scope("Initializer"):
              initial_value = ops.convert_to_tensor(
                  initial_value, name="initial_value", dtype=dtype)
            self._handle = _eager_safe_variable_handle(
                shape=initial_value.get_shape(),
                dtype=initial_value.dtype.base_dtype,
                shared_name=shared_name,
                name=name,
                graph_mode=False)
            self._shape = initial_value.get_shape()
        # pylint: enable=protected-access

        # Or get the initial value from a Tensor or Python object.
        else:
          with ops.name_scope("Initializer"):
            initial_value = ops.convert_to_tensor(
                initial_value, name="initial_value", dtype=dtype)
          # pylint: disable=protected-access
          if (self._in_graph_mode and initial_value is not None and
              initial_value.op._get_control_flow_context() is not None):
            raise ValueError(
                "Initializer for variable %s is from inside a control-flow "
                "construct, such as a loop or conditional. When creating a "
                "variable inside a loop or conditional, use a lambda as the "
                "initializer." % name)
          # pylint: enable=protected-access
          self._handle = _eager_safe_variable_handle(
              shape=initial_value.get_shape(),
              dtype=initial_value.dtype.base_dtype,
              shared_name=shared_name,
              name=name,
              graph_mode=self._in_graph_mode)
          self._shape = initial_value.get_shape()

        self._unique_id = shared_name
        self._initial_value = initial_value if self._in_graph_mode else None
        self._handle_name = handle_name + ":0"
        self._dtype = initial_value.dtype.base_dtype
        self._constraint = constraint

        if self._in_graph_mode:
          with ops.name_scope("IsInitialized"):
            self._is_initialized_op = (
                resource_variable_ops.var_is_initialized_op(self._handle))
          if initial_value is not None:
            with ops.name_scope("Assign") as n, ops.colocate_with(self._handle):
              self._initializer_op = (
                  resource_variable_ops.assign_variable_op(
                      self._handle,
                      self._try_guard_against_uninitialized_dependencies(
                          initial_value),
                      name=n))
          with ops.name_scope("Read"), ops.colocate_with(self._handle):
            # Manually assign reads to the handle's device to avoid log
            # messages.
            with ops.device(self._handle.device):
              value = self._read_variable_op()
            self._graph_element = value
            self._cached_value = None
        else:
          if initialize:
            resource_variable_ops.assign_variable_op(self._handle,
                                                     initial_value)
          self._is_initialized_op = None
          self._initializer_op = None
          self._graph_element = None
          self._cached_value = None

    self._handle_deleter = None
    self._cached_shape_as_list = None
开发者ID:Ajaycs99,项目名称:tensorflow,代码行数:101,代码来源:parameter_server.py


示例13: __init__


#.........这里部分代码省略.........
    init_from_fn = callable(initial_value)

    if constraint is not None and not callable(constraint):
      raise ValueError("The `constraint` argument must be a callable.")

    if isinstance(initial_value, trackable.CheckpointInitialValue):
      self._maybe_initialize_trackable()
      self._update_uid = initial_value.checkpoint_position.restore_uid
      initial_value = initial_value.wrapped_value

    if trainable is None:
      trainable = True
    self._trainable = trainable
    self._save_slice_info = None
    self._initial_value = None
    self._initializer_op = None
    self._is_initialized_op = None
    self._graph_element = None
    self._cached_value = None
    # Store the graph key so optimizers know how to only retrieve variables from
    # this graph. Guaranteed to be the same as the eager graph_key.
    self._graph_key = ops.get_default_graph()._graph_key  # pylint: disable=protected-access
    with ops.name_scope(name, "Variable", []
                        if init_from_fn else [initial_value]) as name:
      # pylint: disable=protected-access
      with ops.init_scope():
        handle_name = ops._name_from_scope_name(name)
        unique_id = "%s_%d" % (handle_name, ops.uid())
        shared_name = context.shared_name(unique_id)
      with ops.name_scope("Initializer"), ops.device(None):
        initial_value = ops.convert_to_tensor(
            initial_value() if init_from_fn else initial_value,
            name="initial_value", dtype=dtype)
      with ops.init_scope():
        self._handle = resource_variable_ops.eager_safe_variable_handle(
            initial_value=initial_value,
            shared_name=shared_name,
            name=name,
            graph_mode=self._in_graph_mode)
      self._shape = initial_value.shape
      self._unique_id = unique_id
      self._handle_name = handle_name + ":0"
      self._dtype = initial_value.dtype.base_dtype
      self._constraint = constraint
      assert initial_value is not None
      if self._in_graph_mode:
        with ops.init_scope():
          outer_graph = ops.get_default_graph()
        func_graph = ops.get_default_graph()
        function_placeholders = (
            func_graph.inputs + func_graph.internal_captures)
        placeholder_ops = set(
            [tensor.op for tensor in function_placeholders])
        lifted_initializer = lift_to_graph.lift_to_graph(
            [initial_value], outer_graph,
            disallowed_placeholders=placeholder_ops)[initial_value]
        with ops.init_scope():
          self._initial_value = lifted_initializer
          with ops.name_scope("IsInitialized"):
            self._is_initialized_op = (
                resource_variable_ops.var_is_initialized_op(self._handle))
          if initial_value is not None:
            with ops.name_scope("Assign") as n, ops.colocate_with(self._handle):
              self._initializer_op = resource_variable_ops.assign_variable_op(
                  self._handle, lifted_initializer, name=n)
          with ops.name_scope("Read"), ops.colocate_with(self._handle):
            # Manually assign reads to the handle's device to avoid log
            # messages.
            with ops.device(self._handle.device):
              value = self._read_variable_op()
            self._graph_element = value
          ops.add_to_collection(ops.GraphKeys.GLOBAL_VARIABLES, self)
      else:
        if add_initializers_to is not None:
          add_initializers_to[self] = initial_value
        def assign_fn():
          with ops.name_scope("Assign") as n, ops.colocate_with(self._handle):
            resource_variable_ops.assign_variable_op(
                self._handle,
                initial_value,
                name=n)
            # Returning values to keep tf.cond happy.
          return ops.convert_to_tensor(1)
        def not_assign_fn():
          return ops.convert_to_tensor(0)
        # Note: this cond is always guaranteed to run because we're inside a
        # defun which will insert automatic control dependencies.
        control_flow_ops.cond(
            resource_variable_ops.var_is_initialized_op(self._handle),
            not_assign_fn, assign_fn)

    # After the handle has been created, set up a way to clean it up when
    # executing eagerly. We'll hold the only reference to the deleter, so that
    # when this object is garbage collected the deleter will be too. This
    # means ResourceVariables can be part of reference cycles without those
    # cycles being uncollectable.
    if not self._in_graph_mode:
      self._handle_deleter = resource_variable_ops.EagerResourceDeleter(
          handle=self._handle, handle_device=self._handle.device)
    self._cached_shape_as_list = None
开发者ID:kylin9872,项目名称:tensorflow,代码行数:101,代码来源:def_function.py


示例14: __init__


#.........这里部分代码省略.........
        uniquified automatically.
      dtype: If set, initial_value will be converted to the given type.
        If None, either the datatype will be kept (if initial_value is
       a Tensor) or float32 will be used (if it is a Python object convertible
       to a Tensor).
      constraint: An optional projection function to be applied to the variable
        after being updated by an `Optimizer` (e.g. used to implement norm
        constraints or value constraints for layer weights). The function must
        take as input the unprojected Tensor representing the value of the
        variable and return the Tensor for the projected value
        (which must have the same shape). Constraints are not safe to
        use when doing asynchronous distributed training.

    Raises:
      ValueError: If the initial value is not specified, or does not have a
        shape and `validate_shape` is `True`.
      RuntimeError: If called outside of a function definition.
    """
    if context.executing_eagerly():
      raise RuntimeError(
          "UnliftedInitializerVariable should not be created "
          "outside of functions.")
    with ops.init_scope():
      if not context.executing_eagerly():
        raise RuntimeError(
            "UnliftedInitializerVariable does not support legacy graph mode.")
    self._in_graph_mode = False
    if initial_value is None:
      raise ValueError("initial_value must be specified.")
    init_from_fn = callable(initial_value)

    if constraint is not None and not callable(constraint):
      raise ValueError("The `constraint` argument must be a callable.")

    if isinstance(initial_value, checkpointable.CheckpointInitialValue):
      self._maybe_initialize_checkpointable()
      self._update_uid = initial_value.checkpoint_position.restore_uid
      initial_value = initial_value.wrapped_value

    self._trainable = trainable
    self._save_slice_info = None
    self._initial_value = None
    self._initializer_op = None
    self._is_initialized_op = None
    self._graph_element = None
    self._cached_value = None
    # Store the graph key so optimizers know how to only retrieve variables from
    # this graph. Guaranteed to be the same as the eager graph_key.
    self._graph_key = ops.get_default_graph()._graph_key  # pylint: disable=protected-access
    with ops.name_scope(name, "Variable", []
                        if init_from_fn else [initial_value]) as name:
      # pylint: disable=protected-access
      with ops.init_scope():
        assert context.executing_eagerly()
        shared_name = ops._name_from_scope_name(name)
        shared_name = "%s_%d" % (shared_name, ops.uid())
      # Use attr_scope and device(None) to simulate the behavior of
      # colocate_with when the variable we want to colocate with doesn't
      # yet exist.
      with ops.name_scope("Initializer"), ops.device(None):
        initial_value = ops.convert_to_tensor(
            initial_value() if init_from_fn else initial_value,
            name="initial_value", dtype=dtype)
      with ops.init_scope():
        self._handle = resource_variable_ops.eager_safe_variable_handle(
            shape=initial_value.get_shape(),
            dtype=initial_value.dtype.base_dtype,
            shared_name=shared_name,
            name=name,
            graph_mode=False)
      self._shape = initial_value.shape
      self._unique_id = shared_name
      self._handle_name = shared_name + ":0"
      self._dtype = initial_value.dtype.base_dtype
      self._constraint = constraint
      assert initial_value is not None
      def assign_fn():
        with ops.name_scope("Assign") as n, ops.colocate_with(self._handle):
          resource_variable_ops.assign_variable_op(
              self._handle,
              initial_value,
              name=n)
        # Returning values to keep tf.cond happy.
        return ops.convert_to_tensor(1)
      def not_assign_fn():
        return ops.convert_to_tensor(0)
      # Note: this cond is always guaranteed to run because we're inside a defun
      # which will insert automatic control dependencies.
      control_flow_ops.cond(
          resource_variable_ops.var_is_initialized_op(self._handle),
          not_assign_fn, assign_fn)

    # After the handle has been created, set up a way to clean it up when
    # executing eagerly. We'll hold the only reference to the deleter, so that
    # when this object is garbage collected the deleter will be too. This
    # means ResourceVariables can be part of reference cycles without those
    # cycles being uncollectable.
    self._handle_deleter = resource_variable_ops.EagerResourceDeleter(
        handle=self._handle, handle_device=self._handle.device)
    self._cached_shape_as_list = None
开发者ID:ThunderQi,项目名称:tensorflow,代码行数:101,代码来源:def_function.py


示例15: __init__


#.........这里部分代码省略.........
        `AUTO` and the current `DistributionStrategy` chooses
        when to synchronize. If `synchronization` is set to `ON_READ`,
        `trainable` must not be set to `True`.
      aggregation: Indicates how a distributed variable will be aggregated.
        Accepted values are constants defined in the class
        `tf.VariableAggregation`.
      shape: (optional) The shape of this variable. If None, the shape of
        `initial_value` will be used. When setting this argument to
        `tf.TensorShape(None)` (representing an unspecified shape), the variable
        can be assigned with values of different shapes.

    Raises:
      ValueError: If the initial value is not specified, or does not have a
        shape and `validate_shape` is `True`.
      RuntimeError: If called outside of a function definition.
    """
    if not ops.inside_function():
      # If we've been init_scope()d out of the function definition nothing to do
      # here; we can't really do the capturing or conditional logic.
      resource_variable_ops.ResourceVariable.__init__(
          self, initial_value=initial_value, trainable=trainable,
          caching_device=caching_device, name=name, dtype=dtype,
          constraint=constraint)
      return
    if initial_value is None:
      raise ValueError("initial_value must be specified.")
    init_from_fn = callable(initial_value)

    if constraint is not None and not callable(constraint):
      raise ValueError("The `constraint` argument must be a callable.")

    if isinstance(initial_value, trackable.CheckpointInitialValue):
      self._maybe_initialize_trackable()
      self._update_uid = initial_value.checkpoint_position.restore_uid
      initial_value = initial_value.wrapped_value

    with ops.name_scope(name, "Variable", []
                        if init_from_fn else [initial_value]) as name:
      with ops.name_scope("Initializer"), ops.device(None):
        initial_value = ops.convert_to_tensor(
            initial_value() if init_from_fn else initial_value,
            name="initial_value", dtype=dtype)
      assert initial_value is not None

      # Don't use `shape or initial_value.shape` since TensorShape has
      # overridden `__bool__`.
      if shape is None:
        shape = initial_value.shape

      # Use the constructor for UninitializedVariable to start.
      super(UnliftedInitializerVariable, self).__init__(
          trainable=trainable,
          caching_device=caching_device,
          name=name,
          shape=shape,
          dtype=initial_value.dtype,
          constraint=constraint,
          synchronization=synchronization,
          aggregation=aggregation,
          extra_handle_data=initial_value,
          **unused_kwargs)

      if self._in_graph_mode:
        with ops.init_scope():
          outer_graph = ops.get_default_graph()
        func_graph = ops.get_default_graph()
        function_placeholders = (
            func_graph.inputs + func_graph.internal_captures)
        placeholder_ops = set(
            [tensor.op for tensor in function_placeholders])
        lifted_initializer = lift_to_graph.lift_to_graph(
            [initial_value], outer_graph,
            disallowed_placeholders=placeholder_ops)[initial_value]
        with ops.init_scope():
          self._initial_value = lifted_initializer
          with ops.name_scope("IsInitialized"):
            self._is_initialized_op = (
                resource_variable_ops.var_is_initialized_op(self._handle))
          if initial_value is not None:
            with ops.name_scope("Assign") as n, ops.colocate_with(self._handle):
              self._initializer_op = resource_variable_ops.assign_variable_op(
                  self._handle, lifted_initializer, name=n)
      else:
        if add_initializers_to is not None:
          add_initializers_to[self] = initial_value
        def assign_fn():
          with ops.name_scope("Assign") as n, ops.colocate_with(self._handle):
            resource_variable_ops.assign_variable_op(
                self._handle,
                initial_value,
                name=n)
            # Returning values to keep tf.cond happy.
          return ops.convert_to_tensor(1)
        def not_assign_fn():
          return ops.convert_to_tensor(0)
        # Note: this cond is always guaranteed to run because we're inside a
        # defun which will insert automatic control dependencies.
        control_flow_ops.cond(
            resource_variable_ops.var_is_initialized_op(self._handle),
            not_assign_fn, assign_fn)
开发者ID:aritratony,项目名称:tensorflow,代码行数:101,代码来源:def_function.py



注:本文中的tensorflow.python.ops.resource_variable_ops.var_is_initialized_op函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。


鲜花

握手

雷人

路过

鸡蛋
该文章已有0人参与评论

请发表评论

全部评论

专题导读
上一篇:
Python resources.initialize_resources函数代码示例发布时间:2022-05-27
下一篇:
Python resource_variable_ops.var_handle_op函数代码示例发布时间:2022-05-27
热门推荐
阅读排行榜

扫描微信二维码

查看手机版网站

随时了解更新最新资讯

139-2527-9053

在线客服(服务时间 9:00~18:00)

在线QQ客服
地址:深圳市南山区西丽大学城创智工业园
电邮:jeky_zhao#qq.com
移动电话:139-2527-9053

Powered by 互联科技 X3.4© 2001-2213 极客世界.|Sitemap