• 设为首页
  • 点击收藏
  • 手机版
    手机扫一扫访问
    迪恩网络手机版
  • 关注官方公众号
    微信扫一扫关注
    公众号

Python control_flow_ops.while_loop函数代码示例

原作者: [db:作者] 来自: [db:来源] 收藏 邀请

本文整理汇总了Python中tensorflow.python.ops.control_flow_ops.while_loop函数的典型用法代码示例。如果您正苦于以下问题:Python while_loop函数的具体用法?Python while_loop怎么用?Python while_loop使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。



在下文中一共展示了while_loop函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。

示例1: testGradSerialTwoLoops

  def testGradSerialTwoLoops(self):
    with self.test_session(use_gpu=True):
      num_steps = 100
      acc = tensor_array_ops.TensorArray(
          dtype=dtypes.float32,
          size=num_steps,
          clear_after_read=False,
          element_shape=tensor_shape.scalar())
      i = constant_op.constant(0, name="i")
      x = constant_op.constant(2.0, name="x")

      c = lambda i, acc: i < 5

      def b(i, acc):
        x1 = control_flow_ops.cond(
            math_ops.equal(i, 0), lambda: x,
            lambda: math_ops.multiply(acc.read(i - 1), 2.0))
        return i + 1, acc.write(i, x1)

      i1, acc1 = control_flow_ops.while_loop(c, b, [i, acc])

      z = constant_op.constant(0.0)

      def fn(i, acc):
        return i + 1, acc.write(i, z)

      _, acc2 = control_flow_ops.while_loop(lambda i, acc: i < num_steps, fn,
                                            [i1, acc1])

      r = acc2.stack()
      grad = gradients_impl.gradients(r, [x])[0]
      self.assertAllClose(31.0, grad.eval())
开发者ID:jzuern,项目名称:tensorflow,代码行数:32,代码来源:tensor_array_ops_test.py


示例2: _testStackWhileSwap

  def _testStackWhileSwap(self, use_gpu):
    with self.test_session(use_gpu=use_gpu):
      n = constant_op.constant(0)
      h = gen_data_flow_ops._stack(dtypes.float32, stack_name="foo")

      def c(x):
        return math_ops.less(x, 10)

      def b(x):
        with ops.control_dependencies([x]):
          a = constant_op.constant(np.ones(2000), dtype=dtypes.float32)
          v = gen_data_flow_ops._stack_push(h, a, swap_memory=True)
        with ops.control_dependencies([v]):
          return math_ops.add(x, 1)

      r = control_flow_ops.while_loop(c, b, [n])

      v = constant_op.constant(np.zeros(2000), dtype=dtypes.float32)

      def c1(x, y):
        return math_ops.greater(x, 0)

      def b1(x, y):
        nx = math_ops.subtract(x, 1)
        ny = y + gen_data_flow_ops._stack_pop(h, dtypes.float32)
        return [nx, ny]

      rx, ry = control_flow_ops.while_loop(
          c1, b1, [r, v], [r.get_shape(), tensor_shape.unknown_shape()])
      self.assertAllClose(np.ones(2000) * 10.0, ry.eval())
开发者ID:AlbertXiebnu,项目名称:tensorflow,代码行数:30,代码来源:stack_ops_test.py


示例3: testWhileContext

 def testWhileContext(self):
     with self.test_session() as sess:
         i = constant_op.constant(0)
         c = lambda i: math_ops.less(i, 10)
         b = lambda i: math_ops.add(i, 1)
         control_flow_ops.while_loop(c, b, [i])
         for op in sess.graph.get_operations():
             c = op._get_control_flow_context()
             if c:
                 compare.ProtoEq(c.to_proto(), control_flow_ops.WhileContext.from_proto(c.to_proto()).to_proto())
开发者ID:tensorflow,项目名称:tensorflow,代码行数:10,代码来源:control_flow_ops_test.py


示例4: testControlFlowInitialization

  def testControlFlowInitialization(self):
    """Expects an error if an initializer is in a control-flow scope."""
    def cond(i, _):
      return i < 10

    def body(i, _):
      zero = array_ops.zeros([], dtype=dtypes.int32)
      v = variables.Variable(initial_value=zero)
      return (i + 1, v.read_value())

    with self.assertRaisesRegexp(ValueError, "inside a control-flow"):
      control_flow_ops.while_loop(cond, body, [0, 0])
开发者ID:adit-chandra,项目名称:tensorflow,代码行数:12,代码来源:variables_test.py


示例5: testGradientInsideLoop

  def testGradientInsideLoop(self):
    with ops.Graph().as_default():
      v = resource_variable_ops.ResourceVariable(1.0)

      def body(_):
        _ = v + 1.0  # This reads the variable inside the loop context
        with backprop.GradientTape() as t:
          result = v * 2
        self.assertTrue(t.gradient(result, v) is not None)
        return 1.0

      control_flow_ops.while_loop(lambda i: False, body, [1.0])
开发者ID:Wajih-O,项目名称:tensorflow,代码行数:12,代码来源:backprop_test.py


示例6: _testWhileContextHelper

 def _testWhileContextHelper(self, maximum_iterations=None):
   with self.test_session() as sess:
     i = constant_op.constant(0)
     c = lambda i: math_ops.less(i, 10)
     b = lambda i: math_ops.add(i, 1)
     control_flow_ops.while_loop(
         c, b, [i], maximum_iterations=maximum_iterations)
     for op in sess.graph.get_operations():
       context = op._get_control_flow_context()
       if context:
         self.assertProtoEquals(context.to_proto(),
                                control_flow_ops.WhileContext.from_proto(
                                    context.to_proto()).to_proto())
开发者ID:AbhinavJain13,项目名称:tensorflow,代码行数:13,代码来源:control_flow_ops_test.py


示例7: body

    def body(it, cost):
      embedding = embedding_ops.embedding_lookup(embedding_matrix, [0])
      cost = control_flow_ops.cond(
          math_ops.equal(it, 3), lambda: math_ops.square(cost),
          (lambda: cost + math_ops.reduce_sum(embedding)))
      return it + 1, cost

      _, cost = control_flow_ops.while_loop(
          cond, body, [constant_op.constant(0),
                       constant_op.constant(0.0)])

      dynamic_grads = gradients_impl.gradients(cost, [embedding_matrix])[0]
      dynamic_grads = math_ops.segment_sum(dynamic_grads.values,
                                           dynamic_grads.indices)

      embedding = embedding_ops.embedding_lookup(embedding_matrix, [0])
      static = math_ops.square(
          math_ops.reduce_sum(embedding) + math_ops.reduce_sum(embedding) +
          math_ops.reduce_sum(embedding)) + math_ops.reduce_sum(embedding)
      static_grads = gradients_impl.gradients(static, [embedding_matrix])[0]
      static_grads = math_ops.segment_sum(static_grads.values,
                                          static_grads.indices)

      with self.cached_session():
        self.evaluate(variables.global_variables_initializer())
        self.assertAllEqual(*self.evaluate([static_grads, dynamic_grads]))
开发者ID:Wajih-O,项目名称:tensorflow,代码行数:26,代码来源:control_flow_ops_test.py


示例8: _repeat_range

def _repeat_range(counts, name=None):
  """Repeat integers given by range(len(counts)) each the given number of times.

  Example behavior:
  [0, 1, 2, 3] -> [1, 2, 2, 3, 3, 3]

  Args:
    counts: 1D tensor with dtype=int32.
    name: optional name for operation.

  Returns:
    1D tensor with dtype=int32 and dynamic length giving the repeated integers.
  """
  with ops.name_scope(name, 'repeat_range', [counts]) as scope:
    counts = ops.convert_to_tensor(counts, name='counts')

    def cond(unused_output, i):
      return i < size

    def body(output, i):
      value = array_ops.fill(counts[i:i+1], i)
      return (output.write(i, value), i + 1)

    size = array_ops.shape(counts)[0]
    init_output_array = tensor_array_ops.TensorArray(
        dtype=dtypes.int32, size=size, infer_shape=False)
    output_array, num_writes = control_flow_ops.while_loop(
        cond, body, loop_vars=[init_output_array, 0])

    return control_flow_ops.cond(
        num_writes > 0,
        output_array.concat,
        lambda: array_ops.zeros(shape=[0], dtype=dtypes.int32),
        name=scope)
开发者ID:AlbertXiebnu,项目名称:tensorflow,代码行数:34,代码来源:resample.py


示例9: createAndRunGraphWithWhileLoop

  def createAndRunGraphWithWhileLoop(self):
    """Create and run a TensorFlow Graph with a while loop to generate dumps."""

    self.dump_root = self.get_temp_dir()
    self.curr_file_path = os.path.abspath(
        tf_inspect.getfile(tf_inspect.currentframe()))

    # Run a simple TF graph to generate some debug dumps that can be used in
    # source annotation.
    with session.Session() as sess:
      loop_body = lambda i: math_ops.add(i, 2)
      self.traceback_first_line = line_number_above()

      loop_cond = lambda i: math_ops.less(i, 16)

      i = constant_op.constant(10, name="i")
      loop = control_flow_ops.while_loop(loop_cond, loop_body, [i])

      run_options = config_pb2.RunOptions(output_partition_graphs=True)
      debug_utils.watch_graph(
          run_options, sess.graph, debug_urls=["file://%s" % self.dump_root])
      run_metadata = config_pb2.RunMetadata()
      sess.run(loop, options=run_options, run_metadata=run_metadata)

      self.dump = debug_data.DebugDumpDir(
          self.dump_root, partition_graphs=run_metadata.partition_graphs)
      self.dump.set_python_graph(sess.graph)
开发者ID:1000sprites,项目名称:tensorflow,代码行数:27,代码来源:source_utils_test.py


示例10: call

    def call(self, inputs):
        batch_size = tf.shape(inputs)[0]
        self.bounder_state_h0 = tf.zeros([batch_size, self.units])

        input_x = tf.transpose(inputs, [2, 3, 0, 1])
        input_x = tf.reshape(input_x, [-1, self.channel])
        input_x = tf.split(axis=0, num_or_size_splits=self.text1_maxlen * self.text2_maxlen, value=input_x)
        inputs_ta = tf.TensorArray(dtype=tf.float32, size=self.text1_maxlen * self.text2_maxlen, name='input_ta')
        states_ta = tf.TensorArray(dtype=tf.float32, size=(self.text1_maxlen + 1) * (self.text2_maxlen + 1),
                                   name='state_ta', clear_after_read=False)

        for i in range(self.text2_maxlen + 1):
            states_ta = states_ta.write(i, self.bounder_state_h0)
        for i in range(self.text1_maxlen):
            states_ta = states_ta.write((i + 1) * (self.text2_maxlen + 1), self.bounder_state_h0)
        inputs_ta = inputs_ta.unstack(input_x)
        _, _, _, hij, _ = control_flow_ops.while_loop(
            cond=lambda _0, _1, i, _3, _4: i < self.recurrent_step,
            body=self.calculate_recurrent_unit,
            loop_vars=(
                inputs_ta, states_ta, tf.Variable(0, dtype=tf.int32), self.bounder_state_h0, self.bounder_state_h0),
            parallel_iterations=1,
            swap_memory=True
        )
        return hij
开发者ID:hhh920406,项目名称:MatchZoo,代码行数:25,代码来源:SpatialGRU.py


示例11: _forward

 def _forward(self, x):
   event_size = array_ops.shape(x)[-1]
   y0 = array_ops.zeros_like(x, name="y0")
   # call the template once to ensure creation
   _ = self._shift_and_log_scale_fn(y0)
   def _loop_body(index, y0):
     """While-loop body for autoregression calculation."""
     # Set caching device to avoid re-getting the tf.Variable for every while
     # loop iteration.
     with variable_scope_lib.variable_scope(
         variable_scope_lib.get_variable_scope()) as vs:
       if vs.caching_device is None:
         vs.set_caching_device(lambda op: op.device)
       shift, log_scale = self._shift_and_log_scale_fn(y0)
     y = x
     if log_scale is not None:
       y *= math_ops.exp(log_scale)
     if shift is not None:
       y += shift
     return index + 1, y
   _, y = control_flow_ops.while_loop(
       cond=lambda index, _: index < event_size,
       body=_loop_body,
       loop_vars=[0, y0])
   return y
开发者ID:AbhinavJain13,项目名称:tensorflow,代码行数:25,代码来源:masked_autoregressive.py


示例12: _timeit

 def _timeit(iterations, _):
   (_, final) = control_flow_ops.while_loop(
       lambda t, _: t < iterations,
       body, (t0, v0),
       parallel_iterations=1,
       back_prop=False)
   return [final]
开发者ID:AliMiraftab,项目名称:tensorflow,代码行数:7,代码来源:sparse_tensor_dense_matmul_op_test.py


示例13: loop_fn

  def loop_fn(i):
    sequence_length_i = array_ops.gather(sequence_length, i)

    def body_fn(t, state, ta):
      inputs_t = array_ops.expand_dims(
          array_ops.gather(inputs_ta.read(t), i), 0)
      output, new_state = cell(inputs_t, state)
      output = array_ops.reshape(output, [-1])
      # TODO(agarwal): one optimization that dynamic_rnn uses is to avoid the
      # array_ops.where when t < min(sequence_length). Doing that requires
      # supporting tf.cond pfor conversion.
      done = t >= sequence_length_i
      output = array_ops.where(done, zeros, output)
      ta = ta.write(t, output)
      new_state = [array_ops.where(done, s, ns) for s, ns in
                   zip(nest.flatten(state), nest.flatten(new_state))]
      new_state = nest.pack_sequence_as(state, new_state)
      return t + 1, new_state, ta

    def condition_fn(t, _, unused):
      del unused
      return t < max_steps

    initial_state = cell.zero_state(1, dtypes.float32)
    _, state, ta = control_flow_ops.while_loop(condition_fn, body_fn, [
        0, initial_state,
        tensor_array_ops.TensorArray(dtypes.float32, max_steps)
    ])

    new_state = [array_ops.reshape(x, [-1]) for x in nest.flatten(state)]
    new_state = nest.pack_sequence_as(initial_state, new_state)
    return ta.stack(), new_state
开发者ID:aritratony,项目名称:tensorflow,代码行数:32,代码来源:control_flow_ops_test.py


示例14: _loop_vars_intertwined

def _loop_vars_intertwined(x0, y0, functor_x, functor_y):
  """Loop whose loop variables are intertwined."""
  c = lambda i, j, x, y: j < 4
  b = lambda i, j, x, y: (j + 1, i + 1, functor_y(y), functor_x(x))
  init = (constant_op.constant(0), constant_op.constant(0), x0, y0)
  ijzw = control_flow_ops.while_loop(c, b, init)
  return ijzw
开发者ID:adit-chandra,项目名称:tensorflow,代码行数:7,代码来源:auto_mixed_precision_test.py


示例15: testIndexedSlicesGradientInCondInWhileLoop

  def testIndexedSlicesGradientInCondInWhileLoop(self):
    with ops.Graph().as_default():
      embedding_matrix = tf.get_variable(
          "embedding_matrix", [5, 5],
          initializer=tf.random_normal_initializer())

      def Cond(it, _):
        return it < 5
      def Body(it, cost):
        embedding = embedding_ops.embedding_lookup(embedding_matrix, [0])
        cost = tf.cond(tf.equal(it, 3),
                       lambda: tf.square(cost),
                       lambda: cost + tf.reduce_sum(embedding))
        return it + 1, cost
      _, cost = control_flow_ops.while_loop(
          Cond, Body, [tf.constant(0), tf.constant(0.0)])

      dynamic_grads = tf.gradients(cost, [embedding_matrix])[0]
      dynamic_grads = tf.segment_sum(dynamic_grads.values,
                                     dynamic_grads.indices)

      embedding = embedding_ops.embedding_lookup(embedding_matrix, [0])
      static = tf.square(
          tf.reduce_sum(embedding) +
          tf.reduce_sum(embedding) +
          tf.reduce_sum(embedding)) + tf.reduce_sum(embedding)
      static_grads = tf.gradients(static, [embedding_matrix])[0]
      static_grads = tf.segment_sum(static_grads.values, static_grads.indices)

      with self.test_session() as sess:
        sess.run(tf.global_variables_initializer())
        self.assertAllEqual(*sess.run([static_grads, dynamic_grads]))
开发者ID:ComeOnGetMe,项目名称:tensorflow,代码行数:32,代码来源:control_flow_ops_test.py


示例16: testDifferentShapesGraph

  def testDifferentShapesGraph(self):
    # Tests that a single kernel instance presented with multiple input shapes
    # does not crash with graph execution.
    with ops.device("gpu:0"):
      layer = cudnn_rnn.CudnnGRU(1, 100)
      layer(array_ops.zeros([28, 100, 100]))

      def _Cond(index, accumulation):
        del accumulation  # unused
        return math_ops.less(index, 4)

      def _Body(index, accumulation):
        layer_input = accumulation[:, :, 10 * (1 + index % 2):]
        output, _ = layer(layer_input)
        return index + 1, accumulation + output

      original_input = array_ops.zeros([28, 100, 100])
      _, accumulation = control_flow_ops.while_loop(_Cond, _Body,
                                                    [0, original_input])
      grad, = gradients.gradients(
          math_ops.reduce_sum(accumulation), (original_input,))
    init_op = variables.global_variables_initializer()
    with self.test_session() as sess:
      sess.run(init_op)
      accumulation_eval, grad_eval = sess.run((accumulation, grad))
      self.assertAllEqual([28, 100, 100], accumulation_eval.shape)
      self.assertAllEqual([28, 100, 100], grad_eval.shape)
开发者ID:AndrewTwinz,项目名称:tensorflow,代码行数:27,代码来源:cudnn_rnn_test.py


示例17: _simple_loop

def _simple_loop(x, functor):
  """Simple loop whose body is provided by the functor."""
  init = (constant_op.constant(0), x)
  c = lambda i, j: i < 4
  b = lambda i, j: (i + 1, functor(j))
  ij = control_flow_ops.while_loop(c, b, init)
  return ij
开发者ID:adit-chandra,项目名称:tensorflow,代码行数:7,代码来源:auto_mixed_precision_test.py


示例18: _v1_nested_while_saved_model

  def _v1_nested_while_saved_model(self):
    export_graph = ops.Graph()
    with export_graph.as_default():

      def _inner_while(loop_iterations):
        _, output = control_flow_ops.while_loop(
            lambda index, accum: index <= loop_iterations,
            lambda index, accum: (index + 1, accum + index),
            [constant_op.constant(0), constant_op.constant(0)])
        return output

      loop_iterations = array_ops.placeholder(
          name="loop_iterations", shape=[], dtype=dtypes.int32)
      _, output = control_flow_ops.while_loop(
          lambda index, accum: index <= loop_iterations,
          lambda index, accum: (index + 1, accum + _inner_while(index)),
          [constant_op.constant(0), constant_op.constant(0)])
      with session_lib.Session() as session:
        path = os.path.join(self.get_temp_dir(), "saved_model", str(ops.uid()))
        simple_save.simple_save(
            session,
            path,
            inputs={"loop_iterations": loop_iterations},
            outputs={"output": output})
    return path
开发者ID:adit-chandra,项目名称:tensorflow,代码行数:25,代码来源:load_v1_in_v2_test.py


示例19: run_while

def run_while(cond_fn, body_fn, init_args):
  """Type-dependent functional while loop.

  Args:
    cond_fn: A Python callable implementing the stop conditions of the loop.
    body_fn: A Python callable implementing the body of the loop.
    init_args: The initial values of the arguments that will be passed to both
      cond_fn and body_fn.

  Returns:
    result: A list of values with the same shape and type as init_args. If any
    of the init_args, or any variables closed-over in cond_fn are Tensors,
    tf.while_loop will be used, otherwise a Python while loop will be ran.

  Raises:
    ValueError: if init_args is not a tuple or list with one or more elements.
  """
  if not isinstance(init_args, (tuple, list)) or not init_args:
    raise ValueError(
        'init_args must be a non-empty list or tuple, found %s' % init_args)

  # TODO(alexbw): statically determine all active variables in cond_fn,
  # and pass them directly
  closure_vars = tuple(
      [c.cell_contents for c in six.get_function_closure(cond_fn) or []])
  possibly_tensors = tuple(init_args) + closure_vars
  if is_tensor(*possibly_tensors):
    return control_flow_ops.while_loop(cond_fn, body_fn, init_args)
  else:
    return py_while_loop(cond_fn, body_fn, init_args)
开发者ID:AndrewTwinz,项目名称:tensorflow,代码行数:30,代码来源:multiple_dispatch.py


示例20: testScanInsideWhile

  def testScanInsideWhile(self):

    def loop_cond(idx_step, *unused_args):
      return idx_step < 1

    def loop_body(idx_step, y):
      x = array_ops.zeros([10, 20, 30], dtype=dtypes.float32)
      x = functional_ops.scan(
          math_ops.add,
          x,
          initializer=array_ops.zeros([20, 30], dtype=dtypes.float32),
          back_prop=False,
          parallel_iterations=1)

      with ops.device('/cpu:0'):
        y = array_ops.identity(x)

        return idx_step + 1, y

    if test.is_gpu_available(cuda_only=True):
      init_y = array_ops.zeros([10, 20, 30], dtype=dtypes.float32)
      _, y = control_flow_ops.while_loop(
          loop_cond,
          loop_body,
          loop_vars=[0, init_y],
          back_prop=False,
          parallel_iterations=1)
      with session.Session() as sess:
        y_v = self.evaluate(y)
        self.assertAllEqual(np.zeros([10, 20, 30]), y_v)
开发者ID:JonathanRaiman,项目名称:tensorflow,代码行数:30,代码来源:constant_folding_test.py



注:本文中的tensorflow.python.ops.control_flow_ops.while_loop函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。


鲜花

握手

雷人

路过

鸡蛋
该文章已有0人参与评论

请发表评论

全部评论

专题导读
上一篇:
Python control_flow_ops.with_dependencies函数代码示例发布时间:2022-05-27
下一篇:
Python control_flow_ops.tuple函数代码示例发布时间:2022-05-27
热门推荐
阅读排行榜

扫描微信二维码

查看手机版网站

随时了解更新最新资讯

139-2527-9053

在线客服(服务时间 9:00~18:00)

在线QQ客服
地址:深圳市南山区西丽大学城创智工业园
电邮:jeky_zhao#qq.com
移动电话:139-2527-9053

Powered by 互联科技 X3.4© 2001-2213 极客世界.|Sitemap