• 设为首页
  • 点击收藏
  • 手机版
    手机扫一扫访问
    迪恩网络手机版
  • 关注官方公众号
    微信扫一扫关注
    公众号

Python gen_state_ops._temporary_variable函数代码示例

原作者: [db:作者] 来自: [db:来源] 收藏 邀请

本文整理汇总了Python中tensorflow.python.ops.gen_state_ops._temporary_variable函数的典型用法代码示例。如果您正苦于以下问题:Python _temporary_variable函数的具体用法?Python _temporary_variable怎么用?Python _temporary_variable使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。



在下文中一共展示了_temporary_variable函数的9个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。

示例1: testTwoTemporaryVariablesNoLeaks

 def testTwoTemporaryVariablesNoLeaks(self):
   with self.test_session(use_gpu=True):
     var1 = gen_state_ops._temporary_variable(
         [1, 2], dtypes.float32, var_name="var1")
     var2 = gen_state_ops._temporary_variable(
         [1, 2], dtypes.float32, var_name="var2")
     final = var1 + var2
     final.eval()
开发者ID:kdavis-mozilla,项目名称:tensorflow,代码行数:8,代码来源:variable_ops_test.py


示例2: testDuplicateTemporaryVariable

 def testDuplicateTemporaryVariable(self):
     with self.test_session(use_gpu=True):
         var1 = gen_state_ops._temporary_variable([1, 2], tf.float32, var_name="dup")
         var1 = tf.assign(var1, [[1.0, 2.0]])
         var2 = gen_state_ops._temporary_variable([1, 2], tf.float32, var_name="dup")
         var2 = tf.assign(var2, [[3.0, 4.0]])
         final = var1 + var2
         with self.assertRaises(errors.AlreadyExistsError):
             final.eval()
开发者ID:adeelzaman,项目名称:tensorflow,代码行数:9,代码来源:variable_ops_test.py


示例3: testTemporaryVariable

 def testTemporaryVariable(self):
     with self.test_session(use_gpu=True):
         var = gen_state_ops._temporary_variable([1, 2], tf.float32, var_name="foo")
         var = tf.assign(var, [[4.0, 5.0]])
         var = tf.assign_add(var, [[6.0, 7.0]])
         final = gen_state_ops._destroy_temporary_variable(var, var_name="foo")
         self.assertAllClose([[10.0, 12.0]], final.eval())
开发者ID:adeelzaman,项目名称:tensorflow,代码行数:7,代码来源:variable_ops_test.py


示例4: testDestroyTemporaryVariableTwice

 def testDestroyTemporaryVariableTwice(self):
   with self.test_session(use_gpu=True):
     var = gen_state_ops._temporary_variable([1, 2], dtypes.float32)
     val1 = gen_state_ops._destroy_temporary_variable(var, var_name="dup")
     val2 = gen_state_ops._destroy_temporary_variable(var, var_name="dup")
     final = val1 + val2
     with self.assertRaises(errors.NotFoundError):
       final.eval()
开发者ID:kdavis-mozilla,项目名称:tensorflow,代码行数:8,代码来源:variable_ops_test.py


示例5: testTemporaryVariableNoLeak

 def testTemporaryVariableNoLeak(self):
   with self.test_session(use_gpu=True):
     var = gen_state_ops._temporary_variable(
         [1, 2],
         tf.float32,
         var_name="bar")
     final = tf.identity(var)
     final.eval()
开发者ID:debaratidas1994,项目名称:tensorflow,代码行数:8,代码来源:variable_ops_test.py


示例6: _AccumulateNTemplate

 def _AccumulateNTemplate(self, inputs, init, shape, validate_shape):
   var = gen_state_ops._temporary_variable(
       shape=shape, dtype=inputs[0].dtype.base_dtype)
   ref = tf.assign(var, init, validate_shape=validate_shape)
   update_ops = [tf.assign_add(ref, tensor, use_locking=True).op
                 for tensor in inputs]
   with tf.control_dependencies(update_ops):
     return gen_state_ops._destroy_temporary_variable(
         ref, var_name=var.op.name)
开发者ID:821760408-sp,项目名称:tensorflow,代码行数:9,代码来源:accumulate_n_benchmark.py


示例7: _thin_stack_lookup_gradient

def _thin_stack_lookup_gradient(op, grad_stack1, grad_stack2, grad_buf_top, _):
    stack, buffer, _, _, buffer_cursors, transitions = op.inputs

    stack2_ptrs = op.outputs[3]
    t = op.get_attr("timestep")

    batch_size = buffer_cursors.get_shape().as_list()[0]
    num_tokens = buffer.get_shape().as_list()[0] / batch_size
    batch_range = math_ops.range(batch_size)
    batch_range_i = tf.to_float(batch_range)

    grad_stack_name = "grad_stack_%i_%s" % (t, str(uuid.uuid4())[:15])
    grad_buffer_name = "grad_buffer_%i_%s" % (t, str(uuid.uuid4())[:15])
    grad_stack = gen_state_ops._temporary_variable(stack.get_shape().as_list(), tf.float32, grad_stack_name)
    grad_buffer = gen_state_ops._temporary_variable(buffer.get_shape().as_list(), tf.float32, grad_buffer_name)
    grad_stack = tf.assign(grad_stack, tf.zeros_like(grad_stack))
    grad_buffer = tf.assign(grad_buffer, tf.zeros_like(grad_buffer))

    updates = []

    # Write grad_stack1 into block (t - 1)
    if t >= 1:
      in_cursors = (t - 1) * batch_size + batch_range
      grad_stack = tf.scatter_add(grad_stack, in_cursors, grad_stack1)

    # Write grad_stack2 using stored lookup pointers
    grad_stack = floaty_scatter_add(grad_stack, stack2_ptrs * batch_size + batch_range_i, grad_stack2)

    # Use buffer_cursors to scatter grads into buffer.
    buffer_ptrs = tf.minimum((float) (num_tokens * batch_size) - 1.0,
                              buffer_cursors * batch_size + batch_range_i)
    grad_buffer = floaty_scatter_add(grad_buffer, buffer_ptrs, grad_buf_top)

    with tf.control_dependencies([grad_stack, grad_buffer]):
      grad_stack = gen_state_ops._destroy_temporary_variable(grad_stack, grad_stack_name)
      grad_buffer = gen_state_ops._destroy_temporary_variable(grad_buffer, grad_buffer_name)

      with tf.control_dependencies([grad_stack, grad_buffer]):
        return grad_stack, grad_buffer, None, None, None, None
开发者ID:hans,项目名称:thinstack-rl,代码行数:39,代码来源:core.py


示例8: accumulate_n

def accumulate_n(inputs, shape=None, tensor_dtype=None, name=None):
  """Returns the element-wise sum of a list of tensors.

  Optionally, pass `shape` and `tensor_dtype` for shape and type checking,
  otherwise, these are inferred.

  For example:

  ```python
  # tensor 'a' is [[1, 2], [3, 4]]
  # tensor `b` is [[5, 0], [0, 6]]
  tf.accumulate_n([a, b, a]) ==> [[7, 4], [6, 14]]

  # Explicitly pass shape and type
  tf.accumulate_n([a, b, a], shape=[2, 2], tensor_dtype=tf.int32)
    ==> [[7, 4], [6, 14]]
  ```

  Args:
    inputs: A list of `Tensor` objects, each with same shape and type.
    shape: Shape of elements of `inputs`.
    tensor_dtype: The type of `inputs`.
    name: A name for the operation (optional).

  Returns:
    A `Tensor` of same shape and type as the elements of `inputs`.

  Raises:
    ValueError: If `inputs` don't all have same shape and dtype or the shape
    cannot be inferred.
  """
  if tensor_dtype is None:
    if not inputs or not isinstance(inputs, (list, tuple)):
      raise ValueError("inputs must be a list of at least one Tensor with the "
                       "same dtype and shape")
    inputs = ops.convert_n_to_tensor_or_indexed_slices(inputs)
    if not all(isinstance(x, ops.Tensor) for x in inputs):
      raise ValueError("inputs must be a list of at least one Tensor with the "
                       "same dtype and shape")
    if not all(x.dtype == inputs[0].dtype for x in inputs):
      raise ValueError("inputs must be a list of at least one Tensor with the "
                       "same dtype and shape")
    tensor_dtype = inputs[0].dtype
  if shape is not None:
    shape = tensor_shape.as_shape(shape)
  else:
    shape = tensor_shape.unknown_shape()
    for input_tensor in inputs:
      if isinstance(input_tensor, ops.Tensor):
        shape = shape.merge_with(input_tensor.get_shape())
  if not shape.is_fully_defined():
    # TODO(pbar): Make a version of assign_add that accepts an uninitialized
    # lvalue, and takes its shape from that? This would allow accumulate_n to
    # work in all situations that add_n currently works.
    raise ValueError("Cannot infer the shape of the accumulator for "
                     "accumulate_n. Pass the shape argument, or set the shape "
                     "of at least one of the inputs.")
  with ops.op_scope(inputs, name, "AccumulateN") as name:
    var = gen_state_ops._temporary_variable(shape=shape, dtype=tensor_dtype)
    var_name = var.op.name
    var = state_ops.assign(var, array_ops.zeros_like(inputs[0]))
    update_ops = []
    for input_tensor in inputs:
      op = state_ops.assign_add(var, input_tensor, use_locking=True)
      update_ops.append(op)
    with ops.control_dependencies(update_ops):
      return gen_state_ops._destroy_temporary_variable(var,
                                                       var_name=var_name,
                                                       name=name)
开发者ID:13331151,项目名称:tensorflow,代码行数:69,代码来源:math_ops.py


示例9: testDestroyNonexistentTemporaryVariable

 def testDestroyNonexistentTemporaryVariable(self):
   with self.test_session(use_gpu=True):
     var = gen_state_ops._temporary_variable([1, 2], dtypes.float32)
     final = gen_state_ops._destroy_temporary_variable(var, var_name="bad")
     with self.assertRaises(errors.NotFoundError):
       final.eval()
开发者ID:kdavis-mozilla,项目名称:tensorflow,代码行数:6,代码来源:variable_ops_test.py



注:本文中的tensorflow.python.ops.gen_state_ops._temporary_variable函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。


鲜花

握手

雷人

路过

鸡蛋
该文章已有0人参与评论

请发表评论

全部评论

专题导读
热门推荐
阅读排行榜

扫描微信二维码

查看手机版网站

随时了解更新最新资讯

139-2527-9053

在线客服(服务时间 9:00~18:00)

在线QQ客服
地址:深圳市南山区西丽大学城创智工业园
电邮:jeky_zhao#qq.com
移动电话:139-2527-9053

Powered by 互联科技 X3.4© 2001-2213 极客世界.|Sitemap