• 设为首页
  • 点击收藏
  • 手机版
    手机扫一扫访问
    迪恩网络手机版
  • 关注官方公众号
    微信扫一扫关注
    公众号

Python variables.global_variables函数代码示例

原作者: [db:作者] 来自: [db:来源] 收藏 邀请

本文整理汇总了Python中tensorflow.python.ops.variables.global_variables函数的典型用法代码示例。如果您正苦于以下问题:Python global_variables函数的具体用法?Python global_variables怎么用?Python global_variables使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。



在下文中一共展示了global_variables函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。

示例1: testReuse

  def testReuse(self):

    def f(x):
      return core_layers.dense(x, self.CHANNELS // 2)

    def g(x):
      return core_layers.dense(x, self.CHANNELS // 2)

    x = random_ops.random_uniform(
        [self.BATCH_SIZE, self.CHANNELS], dtype=dtypes.float32)
    x1, x2 = array_ops.split(x, 2, axis=-1)

    with variable_scope.variable_scope("test"):
      y1, y2 = rev_block_lib.rev_block(x1, x2, f, g, num_layers=self.NUM_LAYERS)

    num_vars_before = len(variables.global_variables())

    with variable_scope.variable_scope("test", reuse=True):
      y1, y2 = rev_block_lib.rev_block(x1, x2, f, g, num_layers=self.NUM_LAYERS)

    num_vars_after = len(variables.global_variables())
    self.assertEqual(num_vars_before, num_vars_after)

    loss = math_ops.reduce_mean(y1 + y2)
    _ = gradients_impl.gradients(loss,
                                 [x] + variables.trainable_variables())

    with variable_scope.variable_scope("test", reuse=True):
      y1, y2 = rev_block_lib.rev_block(x1, x2, f, g, num_layers=self.NUM_LAYERS)

    num_vars_after = len(variables.global_variables())
    self.assertEqual(num_vars_before, num_vars_after)
开发者ID:clsung,项目名称:tensorflow,代码行数:32,代码来源:rev_block_lib_test.py


示例2: testFunctionalReuseFromScope

 def testFunctionalReuseFromScope(self):
   inputs = variables.Variable(
       np.random.random((5, 4, 3, 6)), dtype=dtypes.float32)
   epsilon = 1e-3
   training = array_ops.placeholder(dtype='bool')
   with variable_scope.variable_scope('scope'):
     _ = normalization_layers.batch_norm(
         inputs, axis=-1, momentum=0.9, epsilon=epsilon, training=training)
     self.assertEqual(len(variables.global_variables()), 5)
   with variable_scope.variable_scope('scope', reuse=True):
     _ = normalization_layers.batch_norm(
         inputs, axis=-1, momentum=0.9, epsilon=epsilon, training=training)
     self.assertEqual(len(variables.global_variables()), 5)
开发者ID:adityaatluri,项目名称:tensorflow,代码行数:13,代码来源:normalization_test.py


示例3: testCollectionsWithScope

  def testCollectionsWithScope(self):
    with self.cached_session():
      with ops.name_scope("scope_1"):
        var_x = variables.VariableV1(2.0)
      with ops.name_scope("scope_2"):
        var_y = variables.VariableV1(2.0)

      self.assertEqual([var_x, var_y], variables.global_variables())
      self.assertEqual([var_x], variables.global_variables("scope_1"))
      self.assertEqual([var_y], variables.global_variables("scope_2"))

      self.assertEqual([var_x, var_y], variables.trainable_variables())
      self.assertEqual([var_x], variables.trainable_variables("scope_1"))
      self.assertEqual([var_y], variables.trainable_variables("scope_2"))
开发者ID:adit-chandra,项目名称:tensorflow,代码行数:14,代码来源:variables_test.py


示例4: testNotInLocalVariables

 def testNotInLocalVariables(self):
   with self.test_session():
     with variable_scope.variable_scope('A'):
       a = variables_lib2.model_variable('a', [5])
       self.assertTrue(a in variables_lib.global_variables())
       self.assertTrue(a in ops.get_collection(ops.GraphKeys.MODEL_VARIABLES))
       self.assertFalse(a in variables_lib.local_variables())
开发者ID:AliMiraftab,项目名称:tensorflow,代码行数:7,代码来源:variables_test.py


示例5: testPrepareSessionWithReadyForLocalInitOp

 def testPrepareSessionWithReadyForLocalInitOp(self):
   with ops.Graph().as_default():
     v = variables.Variable(1, name="v")
     w = variables.Variable(
         v,
         trainable=False,
         collections=[ops.GraphKeys.LOCAL_VARIABLES],
         name="w")
     with self.test_session():
       self.assertEqual(False, variables.is_variable_initialized(v).eval())
       self.assertEqual(False, variables.is_variable_initialized(w).eval())
     sm2 = session_manager.SessionManager(
         ready_op=variables.report_uninitialized_variables(),
         ready_for_local_init_op=variables.report_uninitialized_variables(
             variables.global_variables()),
         local_init_op=w.initializer)
     sess = sm2.prepare_session("", init_op=v.initializer)
     self.assertEqual(
         True,
         variables.is_variable_initialized(
             sess.graph.get_tensor_by_name("v:0")).eval(session=sess))
     self.assertEqual(
         True,
         variables.is_variable_initialized(
             sess.graph.get_tensor_by_name("w:0")).eval(session=sess))
     self.assertEquals(1, sess.run(v))
     self.assertEquals(1, sess.run(w))
开发者ID:AlbertXiebnu,项目名称:tensorflow,代码行数:27,代码来源:session_manager_test.py


示例6: testStochasticVariablesWithConstantInitializer

  def testStochasticVariablesWithConstantInitializer(self):
    shape = (10, 20)
    with variable_scope.variable_scope(
        "stochastic_variables",
        custom_getter=sv.make_stochastic_variable_getter(
            dist_cls=dist.NormalWithSoftplusSigma,
            dist_kwargs={"validate_args": True},
            param_initializers={
                "mu": np.ones(shape) * 4.,
                "sigma": np.ones(shape) * 2.
            })):
      v = variable_scope.get_variable("sv")

    for var in variables.global_variables():
      if "mu" in var.name:
        mu_var = var
      if "sigma" in var.name:
        sigma_var = var

    v = ops.convert_to_tensor(v)
    with self.test_session() as sess:
      sess.run(variables.global_variables_initializer())
      self.assertAllEqual(np.ones(shape) * 4., sess.run(mu_var))
      self.assertAllEqual(np.ones(shape) * 2., sess.run(sigma_var))
      self.assertEqual(shape, sess.run(v).shape)
开发者ID:AliMiraftab,项目名称:tensorflow,代码行数:25,代码来源:stochastic_variables_test.py


示例7: testWaitForSessionLocalInit

  def testWaitForSessionLocalInit(self):
    server = server_lib.Server.create_local_server()
    with ops.Graph().as_default() as graph:
      v = variables.Variable(1, name="v")
      w = variables.Variable(
          v,
          trainable=False,
          collections=[ops.GraphKeys.LOCAL_VARIABLES],
          name="w")
      sm = session_manager.SessionManager(
          graph=graph,
          ready_op=variables.report_uninitialized_variables(),
          ready_for_local_init_op=variables.report_uninitialized_variables(
              variables.global_variables()),
          local_init_op=w.initializer)

      # Initialize v but not w
      s = session_lib.Session(server.target, graph=graph)
      s.run(v.initializer)

      sess = sm.wait_for_session(server.target, max_wait_secs=3)
      self.assertEqual(
          True,
          variables.is_variable_initialized(
              sess.graph.get_tensor_by_name("v:0")).eval(session=sess))
      self.assertEqual(
          True,
          variables.is_variable_initialized(
              sess.graph.get_tensor_by_name("w:0")).eval(session=sess))
      self.assertEquals(1, sess.run(v))
      self.assertEquals(1, sess.run(w))
开发者ID:AnishShah,项目名称:tensorflow,代码行数:31,代码来源:session_manager_test.py


示例8: test_gradients_are_computed_with_mean_reduction

  def test_gradients_are_computed_with_mean_reduction(self):
    with self.test_session() as session:
      tower_specs = replicate_model_fn._get_loss_towers(
          self.model_fn,
          mode=model_fn_lib.ModeKeys.EVAL,
          features=[[0.6], [1.6]],
          labels=[[0.6], [0.6]],
          params=None,
          loss_reduction=losses.Reduction.MEAN,
          config=None,
          devices=['/gpu:0', '/gpu:1'],
          local_ps_devices=['/gpu:0'],
          name_scope_pattern='test_tower_{}')
      session.run(variables.global_variables_initializer())

      self.assertEqual(len(tower_specs), 2)

      self.assertEqual('/device:GPU:0', tower_specs[0].loss.device)
      self.assertEqual('averaged_loss:0', tower_specs[0].loss.name)
      self.assertEqual(0.5, session.run(tower_specs[0].loss))

      self.assertEqual('/device:GPU:1', tower_specs[1].loss.device)
      self.assertEqual('test_tower_1/averaged_loss:0', tower_specs[1].loss.name)
      # The input batch for the second tower had a loss that is 1.0
      # bigger: 0.6 vs 1.6.
      self.assertEqual(1.0, session.run(tower_specs[1].loss))

      self.assertEqual(1, len(variables.global_variables()))
      self.assertEqual(1, len(variables.trainable_variables()))

      with variable_scope.variable_scope('', reuse=True):
        c = variable_scope.get_variable('c', dtype=dtypes.float64)
        self.assertEqual(0.25, session.run(c))
开发者ID:AbhinavJain13,项目名称:tensorflow,代码行数:33,代码来源:replicate_model_fn_test.py


示例9: _get_saver

def _get_saver():
  """Lazy init and return saver."""
  saver = _get_first_op_from_collection(ops.GraphKeys.SAVERS)
  if saver is None and variables.global_variables():
    saver = tf_saver.Saver()
    ops.add_to_collection(ops.GraphKeys.SAVERS, saver)
  return saver
开发者ID:HKUST-SING,项目名称:tensorflow,代码行数:7,代码来源:graph_actions.py


示例10: testAverages

  def testAverages(self):
    with self.test_session() as session:
      scale = 2.
      grad = array_ops.ones([3, 4]) * scale
      log_norm = np.log(np.sqrt(scale**2 * grad.get_shape().num_elements()))
      grads_and_vars = [(grad, grad)]
      grads_and_vars = optimizers_lib.adaptive_clipping_fn(
          decay=0.5)(grads_and_vars)

      var_dict = {}
      for var in variables.global_variables():
        if var.name.startswith("AdaptiveMaxNorm"):
          var_dict[var.name.split(":")[0]] = var
      self.assertEqual(2, len(var_dict))
      moving_mean = var_dict["AdaptiveMaxNorm/mean"]
      moving_sq_mean = var_dict["AdaptiveMaxNorm/sq_mean"]
      variables.global_variables_initializer().run()
      mean, sq_mean = session.run([moving_mean, moving_sq_mean])
      self.assertEqual([0], mean)
      self.assertEqual([0], sq_mean)
      for i in range(20):
        mean, sq_mean, _ = session.run(
            [moving_mean, moving_sq_mean, grads_and_vars[0][0]])
        if i == 0:
          self.assertLess(mean, 0.9 * log_norm)
          self.assertLess(sq_mean, 0.9 * log_norm**2)

      self.assertAlmostEqual(float(mean), log_norm, places=4)
      self.assertAlmostEqual(float(sq_mean), log_norm**2, places=4)
开发者ID:Jackhuang945,项目名称:tensorflow,代码行数:29,代码来源:optimizers_test.py


示例11: testVariableReuse

  def testVariableReuse(self):

    def LinearWithReuse(input_tensor, reuse=None):
      size = input_tensor.shape.dims[1]
      with variable_scope.variable_scope("linear", reuse=reuse):
        w = variable_scope.get_variable(
            "w", shape=[size, size], dtype=input_tensor.dtype)
      return math_ops.matmul(input_tensor, w)

    @function.Defun(dtypes.float32)
    def Foo(inputs):
      inputs = array_ops.reshape(inputs, [32, 100])
      hidden = LinearWithReuse(inputs)
      return LinearWithReuse(hidden, reuse=True)

    input_op = array_ops.placeholder(shape=[32, 100], dtype=dtypes.float32)
    output_op = Foo(input_op)

    global_vars = variables.global_variables()
    self.assertEqual(len(global_vars), 1)
    self.assertEqual(global_vars[0].name, "linear/w:0")

    with session.Session() as sess:
      sess.run(variables.global_variables_initializer())
      output_val = sess.run(
          output_op, feed_dict={input_op: np.random.rand(32, 100)})
      self.assertEqual(output_val.shape, (32, 100))
开发者ID:AbhinavJain13,项目名称:tensorflow,代码行数:27,代码来源:function_test.py


示例12: testStochasticVariablesWithCallableInitializer

  def testStochasticVariablesWithCallableInitializer(self):
    shape = (10, 20)

    def sigma_init(shape, dtype, partition_info):
      _ = partition_info
      return array_ops.ones(shape, dtype=dtype) * 2.

    with variable_scope.variable_scope(
        "stochastic_variables",
        custom_getter=sv.make_stochastic_variable_getter(
            dist_cls=dist.NormalWithSoftplusScale,
            dist_kwargs={"validate_args": True},
            param_initializers={
                "loc": np.ones(
                    shape, dtype=np.float32) * 4.,
                "scale": sigma_init
            })):
      v = variable_scope.get_variable("sv", shape)

    for var in variables.global_variables():
      if "loc" in var.name:
        mu_var = var
      if "scale" in var.name:
        sigma_var = var

    v = ops.convert_to_tensor(v)
    with self.test_session() as sess:
      sess.run(variables.global_variables_initializer())
      self.assertAllEqual(np.ones(shape) * 4., sess.run(mu_var))
      self.assertAllEqual(np.ones(shape) * 2., sess.run(sigma_var))
      self.assertEqual(shape, sess.run(v).shape)
开发者ID:1000sprites,项目名称:tensorflow,代码行数:31,代码来源:stochastic_variables_test.py


示例13: testFunctionCallInDifferentVariableScopes

  def testFunctionCallInDifferentVariableScopes(self):

    @function.Defun(dtypes.float32)
    def Foo(inputs):
      var = variable_scope.get_variable(
          "var",
          shape=[10],
          dtype=dtypes.float32,
          initializer=init_ops.ones_initializer())
      return inputs + var

    input_op = array_ops.placeholder(shape=[10], dtype=dtypes.float32)
    with variable_scope.variable_scope("vs1"):
      out1_op = Foo(input_op)

    with variable_scope.variable_scope("vs2"):
      out2_op = Foo(input_op)

    global_vars = variables.global_variables()
    self.assertEqual(len(global_vars), 1)
    self.assertEqual(global_vars[0].name, "vs1/var:0")

    with session.Session() as sess:
      sess.run(variables.global_variables_initializer())
      out1, out2 = sess.run(
          [out1_op, out2_op], feed_dict={input_op: np.linspace(1, 10, 10)})
      self.assertAllEqual(out1, np.linspace(2, 11, 10))
      self.assertAllEqual(out2, np.linspace(2, 11, 10))
开发者ID:AbhinavJain13,项目名称:tensorflow,代码行数:28,代码来源:function_test.py


示例14: add_meta_graph

  def add_meta_graph(self,
                     tags,
                     signature_def_map=None,
                     assets_collection=None,
                     legacy_init_op=None,
                     clear_devices=False,
                     main_op=None):
    """Adds the current meta graph to the SavedModel.

    Creates a Saver in the current scope and uses the Saver to export the meta
    graph def. Invoking this API requires the `add_meta_graph_and_variables()`
    API to have been invoked before.

    Args:
      tags: The set of tags to annotate the meta graph def with.
      signature_def_map: The map of signature defs to be added to the meta graph
          def.
      assets_collection: Assets collection to be saved with SavedModel. Note
          that this collection should be a subset of the assets saved as part of
          the first meta graph in the SavedModel.
      legacy_init_op: Legacy support for op or group of ops to execute after the
          restore op upon a load.
      clear_devices: Set to true if the device info on the default graph should
          be cleared.
      main_op: Op or group of ops to execute when the graph is loaded.

    Raises:
      AssertionError: If the variables for the SavedModel have not been saved
          yet.
    """
    if not self._has_saved_variables:
      raise AssertionError(
          "Graph state including variables and assets has not been saved yet. "
          "Please invoke `add_meta_graph_and_variables()` first.")

    # Validate the signature def map to ensure all included TensorInfos are
    # properly populated.
    self._validate_signature_def_map(signature_def_map)

    # Save asset files and write them to disk, if any.
    self._save_and_write_assets(assets_collection)

    if main_op is None:
      # Add legacy init op to the SavedModel.
      self._maybe_add_legacy_init_op(legacy_init_op)
    else:
      self._add_main_op(main_op)

    # Initialize a saver to generate a sharded output for all variables in the
    # current scope.
    saver = tf_saver.Saver(
        variables.global_variables(),
        sharded=True,
        write_version=saver_pb2.SaverDef.V2,
        allow_empty=True)

    meta_graph_def = saver.export_meta_graph(clear_devices=clear_devices)

    # Tag the meta graph def and add it to the SavedModel.
    self._tag_and_add_meta_graph(meta_graph_def, tags, signature_def_map)
开发者ID:Immexxx,项目名称:tensorflow,代码行数:60,代码来源:builder_impl.py


示例15: testBasicLSTMCell

 def testBasicLSTMCell(self):
   for dtype in [dtypes.float16, dtypes.float32]:
     np_dtype = dtype.as_numpy_dtype
     with self.test_session(graph=ops.Graph()) as sess:
       with variable_scope.variable_scope(
           "root", initializer=init_ops.constant_initializer(0.5)):
         x = array_ops.zeros([1, 2], dtype=dtype)
         m = array_ops.zeros([1, 8], dtype=dtype)
         cell = rnn_cell_impl.MultiRNNCell(
             [
                 rnn_cell_impl.BasicLSTMCell(2, state_is_tuple=False)
                 for _ in range(2)
             ],
             state_is_tuple=False)
         self.assertEqual(cell.dtype, None)
         g, out_m = cell(x, m)
         # Layer infers the input type.
         self.assertEqual(cell.dtype, dtype.name)
         expected_variable_names = [
             "root/multi_rnn_cell/cell_0/basic_lstm_cell/%s:0" %
             rnn_cell_impl._WEIGHTS_VARIABLE_NAME,
             "root/multi_rnn_cell/cell_0/basic_lstm_cell/%s:0" %
             rnn_cell_impl._BIAS_VARIABLE_NAME,
             "root/multi_rnn_cell/cell_1/basic_lstm_cell/%s:0" %
             rnn_cell_impl._WEIGHTS_VARIABLE_NAME,
             "root/multi_rnn_cell/cell_1/basic_lstm_cell/%s:0" %
             rnn_cell_impl._BIAS_VARIABLE_NAME
         ]
         self.assertEqual(expected_variable_names,
                          [v.name for v in cell.trainable_variables])
         self.assertFalse(cell.non_trainable_variables)
         sess.run([variables_lib.global_variables_initializer()])
         res = sess.run([g, out_m], {
             x.name: np.array([[1., 1.]]),
             m.name: 0.1 * np.ones([1, 8])
         })
         self.assertEqual(len(res), 2)
         variables = variables_lib.global_variables()
         self.assertEqual(expected_variable_names, [v.name for v in variables])
         # The numbers in results were not calculated, this is just a
         # smoke test.
         self.assertAllClose(res[0], np.array(
             [[0.240, 0.240]], dtype=np_dtype), 1e-2)
         expected_mem = np.array(
             [[0.689, 0.689, 0.448, 0.448, 0.398, 0.398, 0.240, 0.240]],
             dtype=np_dtype)
         self.assertAllClose(res[1], expected_mem, 1e-2)
       with variable_scope.variable_scope(
           "other", initializer=init_ops.constant_initializer(0.5)):
         # Test BasicLSTMCell with input_size != num_units.
         x = array_ops.zeros([1, 3], dtype=dtype)
         m = array_ops.zeros([1, 4], dtype=dtype)
         g, out_m = rnn_cell_impl.BasicLSTMCell(2, state_is_tuple=False)(x, m)
         sess.run([variables_lib.global_variables_initializer()])
         res = sess.run(
             [g, out_m], {
                 x.name: np.array([[1., 1., 1.]], dtype=np_dtype),
                 m.name: 0.1 * np.ones([1, 4], dtype=np_dtype)
             })
         self.assertEqual(len(res), 2)
开发者ID:ChengYuXiang,项目名称:tensorflow,代码行数:60,代码来源:core_rnn_cell_test.py


示例16: add_variable

  def add_variable(self, name, shape, dtype=None,
                   initializer=None, regularizer=None, trainable=True):
    """Adds a new variable to the layer, or gets an existing one; returns it.

    Arguments:
      name: variable name.
      shape: variable shape.
      dtype: The type of the variable. Defaults to `self.dtype`.
      initializer: initializer instance (callable).
      regularizer: regularizer instance (callable).
      trainable: whether the variable should be part of the layer's
        "trainable_variables" (e.g. variables, biases)
        or "non_trainable_variables" (e.g. BatchNorm mean, stddev).

    Returns:
      The created variable.
    """
    if dtype is None:
      dtype = self.dtype
    existing_variables = set(tf_variables.global_variables())

    self._set_scope(None)

    with vs.variable_scope(self._scope,
                           reuse=self.built or self._reuse) as scope:
      with ops.name_scope(scope.original_name_scope):
        variable = vs.get_variable(name,
                                   shape=shape,
                                   initializer=initializer,
                                   dtype=dtypes.as_dtype(dtype),
                                   trainable=trainable and self.trainable)
        if variable in existing_variables:
          return variable
        if regularizer:
          # To match the behavior of tf.get_variable(), we only
          # apply regularization if the variable is newly created.
          if isinstance(variable, tf_variables.PartitionedVariable):
            for v in variable:
              with ops.colocate_with(v.op):
                with ops.name_scope(name + '/Regularizer'):
                  regularization = regularizer(v)
              if regularization is not None:
                self.add_loss(regularization)
                _add_elements_to_collection(
                    regularization, ops.GraphKeys.REGULARIZATION_LOSSES)
          else:
            with ops.colocate_with(variable.op):
              with ops.name_scope(name + '/Regularizer'):
                regularization = regularizer(variable)
            if regularization is not None:
              self.add_loss(regularization)
              _add_elements_to_collection(
                  regularization, ops.GraphKeys.REGULARIZATION_LOSSES)
    if trainable:
      self._trainable_weights.append(variable)
    else:
      self._non_trainable_weights.append(variable)
    return variable
开发者ID:AlbertXiebnu,项目名称:tensorflow,代码行数:58,代码来源:base.py


示例17: _add_variable

    def _add_variable(
        self,
        name,
        shape,
        dtype=None,
        initializer=None,
        regularizer=None,
        trainable=True,
        variable_getter=vs.get_variable,
    ):
        """Adds a new variable to the layer.

    Arguments:
      name: variable name.
      shape: variable shape.
      dtype: The type of the variable. Defaults to `self.dtype`.
      initializer: initializer instance (callable).
      regularizer: regularizer instance (callable).
      trainable: whether the variable should be part of the layer's
        "trainable_variables" (e.g. variables, biases)
        or "non_trainable_variables" (e.g. BatchNorm mean, stddev).
      variable_getter: The getter to use for TensorFlow variables.

    Returns:
      The created variable.
    """
        if dtype is None:
            dtype = self.dtype
        existing_variables = set(tf_variables.global_variables())
        variable = variable_getter(
            name, shape=shape, initializer=initializer, dtype=dtype, trainable=trainable and self.trainable
        )
        # TODO(sguada) fix name = variable.op.name
        if regularizer:
            if not self._reuse and variable not in existing_variables:
                # To match the behavior of tf.get_variable(), we only
                # apply regularization if the variable is newly created.
                if isinstance(variable, tf_variables.PartitionedVariable):
                    for v in variable:
                        with ops.colocate_with(v.op):
                            with ops.name_scope(name + "/Regularizer"):
                                regularization = regularizer(v)
                        if regularization is not None:
                            self._losses.append(regularization)
                            _add_elements_to_collection(regularization, ops.GraphKeys.REGULARIZATION_LOSSES)
                else:
                    with ops.colocate_with(variable.op):
                        with ops.name_scope(name + "/Regularizer"):
                            regularization = regularizer(variable)
                    if regularization is not None:
                        self._losses.append(regularization)
                        _add_elements_to_collection(regularization, ops.GraphKeys.REGULARIZATION_LOSSES)
        if trainable:
            self._trainable_variables.append(variable)
        else:
            self._non_trainable_variables.append(variable)
        return variable
开发者ID:kdavis-mozilla,项目名称:tensorflow,代码行数:57,代码来源:base.py


示例18: DISABLED_testShared

 def DISABLED_testShared(self):
   with self.test_session():
     with specs.ops:
       # pylint: disable=undefined-variable
       f = Shared(Fr(100))
       g = f | f | f | f
     inputs = constant_op.constant(_rand(10, 100))
     _ = g.funcall(inputs)
     self.assertEqual(len(variables.global_variables()), 2)
开发者ID:1000sprites,项目名称:tensorflow,代码行数:9,代码来源:specs_test.py


示例19: _get_variable_for

def _get_variable_for(v):
  """Returns the ResourceVariable responsible for v, or v if not necessary."""
  if v.op.type == "ResourceGather":
    for var in variables.global_variables() + variables.local_variables():
      if (isinstance(var, resource_variable_ops.ResourceVariable)
          and var.handle is v.op.inputs[0]):
        return var
    raise ValueError("Got embedding lookup %s but"
                     " could not locate source variable." % (str(v)))
  return v
开发者ID:Jackhuang945,项目名称:tensorflow,代码行数:10,代码来源:optimizer.py


示例20: _get_variable

 def _get_variable(var_name, part_name, ema):
   """Returns variable of it's moving average by name."""
   matches = [
       v for v in variables.global_variables()
       if ((var_name in v.op.name)
           and (part_name in v.op.name)
           and (('ExponentialMovingAverage' in v.op.name) == ema))
   ]
   self.assertEqual(len(matches), 1)
   return matches[0]
开发者ID:Ajaycs99,项目名称:tensorflow,代码行数:10,代码来源:moving_average_optimizer_test.py



注:本文中的tensorflow.python.ops.variables.global_variables函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。


鲜花

握手

雷人

路过

鸡蛋
该文章已有0人参与评论

请发表评论

全部评论

专题导读
热门推荐
阅读排行榜

扫描微信二维码

查看手机版网站

随时了解更新最新资讯

139-2527-9053

在线客服(服务时间 9:00~18:00)

在线QQ客服
地址:深圳市南山区西丽大学城创智工业园
电邮:jeky_zhao#qq.com
移动电话:139-2527-9053

Powered by 互联科技 X3.4© 2001-2213 极客世界.|Sitemap