• 设为首页
  • 点击收藏
  • 手机版
    手机扫一扫访问
    迪恩网络手机版
  • 关注官方公众号
    微信扫一扫关注
    公众号

Python tensorflow.while_loop函数代码示例

原作者: [db:作者] 来自: [db:来源] 收藏 邀请

本文整理汇总了Python中tensorflow.while_loop函数的典型用法代码示例。如果您正苦于以下问题:Python while_loop函数的具体用法?Python while_loop怎么用?Python while_loop使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。



在下文中一共展示了while_loop函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。

示例1: testGradSerialTwoLoops

  def testGradSerialTwoLoops(self):
    with self.test_session():
      num_steps = 100
      acc = tf.TensorArray(dtype=tf.float32, size=num_steps,
                           clear_after_read=False,
                           element_shape=tensor_shape.scalar())
      i = tf.constant(0, name="i")
      x = tf.constant(2.0, name="x")

      c = lambda i, acc: i < 5
      def b(i, acc):
        x1 = tf.cond(tf.equal(i, 0),
                     lambda: x,
                     lambda: tf.mul(acc.read(i - 1), 2.0))
        return i + 1, acc.write(i, x1)
      i1, acc1 = tf.while_loop(c, b, [i, acc])

      z = tf.constant(0.0)
      def fn(i, acc):
        return i + 1, acc.write(i, z)
      _, acc2 = tf.while_loop(lambda i, acc: i < num_steps, fn, [i1, acc1])

      r = acc2.stack()
      grad = tf.gradients(r, [x])[0]
      self.assertAllClose(31.0, grad.eval())
开发者ID:BloodD,项目名称:tensorflow,代码行数:25,代码来源:tensor_array_ops_test.py


示例2: _testStackWhileSwap

  def _testStackWhileSwap(self, use_gpu):
    with self.test_session(use_gpu=use_gpu):
      n = tf.constant(0)
      h = gen_data_flow_ops._stack(tf.float32, stack_name="foo")

      def c(x):
        return tf.less(x, 10)
      def b(x):
        with tf.control_dependencies([x]):
          a = tf.constant(np.ones(2000), dtype=tf.float32)
          v = gen_data_flow_ops._stack_push(h, a, swap_memory=True)
        with tf.control_dependencies([v]):
          return tf.add(x, 1)
      r = tf.while_loop(c, b, [n])

      v = tf.constant(np.zeros(2000), dtype=tf.float32)
      def c1(x, y):
        return tf.greater(x, 0)
      def b1(x, y):
        nx = tf.sub(x, 1)
        ny = y + gen_data_flow_ops._stack_pop(h, tf.float32)
        return [nx, ny]
      rx, ry = tf.while_loop(c1, b1, [r, v],
                             [r.get_shape(), tensor_shape.unknown_shape()])
      self.assertAllClose(np.ones(2000) * 10.0, ry.eval())
开发者ID:821760408-sp,项目名称:tensorflow,代码行数:25,代码来源:stack_ops_test.py


示例3: body

 def body(x):
     def nest_body(c):
         return tf.multiply(c, 2)
     def cd(c): return tf.less(c, 10)
     c = tf.constant(2)
     res = tf.while_loop(cd, nest_body, loop_vars=[c])
     return tf.nn.relu(x + res)
开发者ID:bddppq,项目名称:tvm,代码行数:7,代码来源:test_control_flow.py


示例4: batch_embed_lookup

def batch_embed_lookup(embedding, ids):
    '''
        embedding: shape(b_sz, tstp, emb_sz)
        ids : shape(b_sz, k)
    '''
    input_shape = tf.shape(embedding)
    time_steps = input_shape[0]
    def _create_ta(name, dtype):
        return tf.TensorArray(dtype=dtype,
                              size=time_steps,
                              tensor_array_name=name)
    input_ta = _create_ta('input_ta', embedding.dtype)
    fetch_ta = _create_ta('fetch_ta', ids.dtype)
    output_ta = _create_ta('output_ta', embedding.dtype)
    input_ta = input_ta.unstack(embedding)
    fetch_ta = fetch_ta.unstack(ids)

    def loop_body(time, output_ta):
        embed = input_ta.read(time) #shape(tstp, emb_sz) type of float32
        fetch_id = fetch_ta.read(time) #shape(tstp) type of int32
        out_emb = tf.nn.embedding_lookup(embed, fetch_id)
        output_ta = output_ta.write(time, out_emb)

        next_time = time+1
        return next_time, output_ta
    time = tf.constant(0)
    _, output_ta = tf.while_loop(cond=lambda time, *_: time < time_steps,
                  body=loop_body, loop_vars=(time, output_ta),
                  swap_memory=True)
    ret_t = output_ta.stack() #shape(b_sz, tstp, embd_sz)
    return ret_t
开发者ID:et0803,项目名称:nlpcc2017_news_headline_categorization,代码行数:31,代码来源:TfUtils.py


示例5: loss

  def loss(self, predicts, labels, objects_num):
    """Add Loss to all the trainable variables

    Args:
      predicts: 4-D tensor [batch_size, cell_size, cell_size, 5 * boxes_per_cell]
      ===> (num_classes, boxes_per_cell, 4 * boxes_per_cell)
      labels  : 3-D tensor of [batch_size, max_objects, 5]
      objects_num: 1-D tensor [batch_size]
    """
    class_loss = tf.constant(0, tf.float32)
    object_loss = tf.constant(0, tf.float32)
    noobject_loss = tf.constant(0, tf.float32)
    coord_loss = tf.constant(0, tf.float32)
    loss = [0, 0, 0, 0]
    for i in range(self.batch_size):
      predict = predicts[i, :, :, :]
      label = labels[i, :, :]
      object_num = objects_num[i]
      nilboy = tf.ones([7,7,2])
      tuple_results = tf.while_loop(self.cond1, self.body1, [tf.constant(0), object_num, [class_loss, object_loss, noobject_loss, coord_loss], predict, label, nilboy])
      for j in range(4):
        loss[j] = loss[j] + tuple_results[2][j]
      nilboy = tuple_results[5]

    tf.add_to_collection('losses', (loss[0] + loss[1] + loss[2] + loss[3])/self.batch_size)

    tf.summary.scalar('class_loss', loss[0]/self.batch_size)
    tf.summary.scalar('object_loss', loss[1]/self.batch_size)
    tf.summary.scalar('noobject_loss', loss[2]/self.batch_size)
    tf.summary.scalar('coord_loss', loss[3]/self.batch_size)
    tf.summary.scalar('weight_loss', tf.add_n(tf.get_collection('losses')) - (loss[0] + loss[1] + loss[2] + loss[3])/self.batch_size )

    return tf.add_n(tf.get_collection('losses'), name='total_loss'), nilboy
开发者ID:TrendonixNetwork,项目名称:ProjectCybonix,代码行数:33,代码来源:yolo_tiny_net.py


示例6: prior_model

    def prior_model(prior_queue_init, length=SIG_LEN):
        def cond(loop_counter, *_):
            return tf.less(loop_counter, length)

        def body(loop_counter, accumulated_output_array, accumulated_logits_array, next_input, *queue_contents):
            next_logit, queue_updates = sub_predictor(next_input, queue_contents)
            gumbeled = next_logit[:, 0, :] - tf.log(-tf.log(tf.random_uniform((tf.shape(next_logit)[0], QUANT_LEVELS))))
            sample_disc = tf.arg_max(gumbeled, 1)
            sample_cont = dequantizer(sample_disc, QUANT_LOWER, QUANT_UPPER, QUANT_LEVELS)
            accumulated_output_array = accumulated_output_array.write(loop_counter, sample_cont)
            accumulated_logits_array = accumulated_logits_array.write(loop_counter, next_logit[:, 0, :])
            sample_cont = tf.expand_dims(sample_cont, 1)
            sample_cont = tf.expand_dims(sample_cont, 1) # sic
            next_input = tf.concat(2, (sample_cont, tf.ones_like(sample_cont)))
            return [loop_counter+1, accumulated_output_array, accumulated_logits_array, next_input] + queue_updates

        accumulated_output_array = tf.TensorArray(tf.float32, size=SIG_LEN, clear_after_read=False)
        accumulated_logits_array = tf.TensorArray(tf.float32, size=SIG_LEN, clear_after_read=False)

        loop_var_init = [tf.constant(0, dtype=tf.int32), accumulated_output_array, accumulated_logits_array, tf.zeros((PRIOR_BATCH_SIZE, 1, 2))] + prior_queue_init
        accumulated_output_array, accumulated_logits_array = tf.while_loop(cond, body, loop_var_init, back_prop=False)[1:3]
        output = tf.transpose(accumulated_output_array.pack(), [1, 0])
        logits = tf.transpose(accumulated_logits_array.pack(), [1, 0, 2])

        output.set_shape((PRIOR_BATCH_SIZE, length))
        logits.set_shape((PRIOR_BATCH_SIZE, length, QUANT_LEVELS))
        return output, logits
开发者ID:NoahDStein,项目名称:NeuralNetSandbox,代码行数:27,代码来源:wavenet.py


示例7: batch_logits

def batch_logits(indices, acts):
  init_outs = tf.zeros([1, FLAGS.wvs, 1])

  def logits_continue(*parms):
    cur, idxs, _, _, _ = parms
    return tf.less(cur, tf.size(idxs), name='batch_done')

  def logits_batch_body(*parms):
    i, idxs, ptr, css, act = parms
    i_s = tf.reshape(tf.slice(idxs, tf.pack([i]), [1]), [])
    start, size = get_bounds(i_s)
    outs = forward_prop_nodes(start, size, acts, ptr)
    new_css = tf.cond(tf.equal(i, iZERO),
                      lambda: outs,
                      lambda: tf.concat(0, [css, outs]))
    return i + iONE, indices, ptr + size, new_css, acts
  with tf.device('/cpu:0'):
    iZ =  tf.convert_to_tensor(0, dtype=tf.int32)
  zero_activations(acts)
  while_parms = [iZ, indices, iZ, init_outs, acts]
  _, _, _, outs, _ = tf.while_loop(logits_continue, logits_batch_body, while_parms,
                                   parallel_iterations=1, name='batch_logits')
  lumpy_logits = tf.map_fn(activation_to_logits, outs, name='raw_logits')
  logits = tf.squeeze(lumpy_logits, [2], name='logits')
  return logits
开发者ID:rgobbel,项目名称:rntn,代码行数:25,代码来源:tf_rntn.py


示例8: _build_eval_specific_graph

  def _build_eval_specific_graph(self, iterator, model_fn, params,
                                 record_files_placeholder, num_eval_steps):
    """Builds the part of the model that is specific to evaluation."""

    def build():
      features = iterator.get_next()
      estimator_spec = model_fn(
          features, None, tf.estimator.ModeKeys.EVAL, params)
      run_model_op = tf.group(*(update_op for _, update_op in
                                estimator_spec.eval_metric_ops.values()))
      eval_metric_tensors = {k: tensor for (k, (tensor, _))
                             in estimator_spec.eval_metric_ops.items()}
      return run_model_op, estimator_spec.loss, eval_metric_tensors

    if self._use_while_loop:
      def body(i):
        run_model_op_single_step, _, _ = build()
        with tf.control_dependencies([run_model_op_single_step]):
          return i + 1

      run_model_op = tf.while_loop(lambda i: i < num_eval_steps, body, [0],
                                   parallel_iterations=1)
      loss = None
      eval_metric_tensors = {
          "HR": self._compute_metric_mean(rconst.HR_METRIC_NAME),
          "NDCG": self._compute_metric_mean(rconst.NDCG_METRIC_NAME),
      }
    else:
      run_model_op, loss, eval_metric_tensors = build()

    metric_initializer = tf.variables_initializer(
        tf.get_collection(tf.GraphKeys.METRIC_VARIABLES))
    return self._EvalModelProperties(
        record_files_placeholder, iterator, loss, params["eval_batch_size"],
        run_model_op, eval_metric_tensors, metric_initializer)
开发者ID:812864539,项目名称:models,代码行数:35,代码来源:model_runner.py


示例9: _build_train_specific_graph

  def _build_train_specific_graph(self, iterator, model_fn, params,
                                  record_files_placeholder, num_train_steps):
    """Builds the part of the model that is specific to training."""

    def build():
      features, labels = iterator.get_next()
      estimator_spec = model_fn(
          features, labels, tf.estimator.ModeKeys.TRAIN, params)
      with tf.control_dependencies([estimator_spec.train_op]):
        run_model_op = self._global_step.assign_add(1)
      return run_model_op, estimator_spec.loss

    if self._use_while_loop:
      def body(i):
        run_model_op_single_step, _ = build()
        with tf.control_dependencies([run_model_op_single_step]):
          return i + 1

      run_model_op = tf.while_loop(lambda i: i < num_train_steps, body, [0],
                                   parallel_iterations=1)
      loss = None
    else:
      run_model_op, loss = build()

    return self._TrainModelProperties(
        record_files_placeholder, iterator, loss, params["batch_size"],
        run_model_op)
开发者ID:812864539,项目名称:models,代码行数:27,代码来源:model_runner.py


示例10: forward_prop_nodes

def forward_prop_nodes(i_start, size, acts, offset):
  # Note: In the corpus that we've seen, parse trees are always ordered such that
  # iteration forward through the list will be in bottom-up order.
  # Conversely, iteration in reverse is always top-down.
  # This enables a simple iterative algorithm. If this were not the case,
  # putting the nodes in order by a postorder traversal would fix it.
  def fwd_continue(*parms):
    (_, sz, cur, _) = parms
    return tf.less(cur, sz, name='cur_le_size')

  def forward_prop(*parms):
    (i0, sz, cur, act) = parms
    with tf.device('/gpu:0'):
      gact = act
      gcur = cur
      next_idx = i0 + gcur
    node_out = tf.reshape(forward_node(next_idx, act, offset), [1, FLAGS.wvs, 1], name='node_out')
    tf.scatter_add(gact, tf.pack([gcur]), node_out, name='act_update')
    act = gact
    return [i0, sz, cur + iONE, act]

  with tf.device('/cpu:0'):
    i_start = tf.convert_to_tensor(i_start, dtype=tf.int32, name='i_start')
    size = tf.convert_to_tensor(size, dtype=tf.int32, name='size')
    iZ = tf.convert_to_tensor(0, dtype=tf.int32, name='ZERO')

  while_parms = [i_start, size, iZ, acts]
  wresult = tf.while_loop(fwd_continue, forward_prop, while_parms, parallel_iterations=1,
                          name='forward_prop_while')
  (_, _, _, result) = wresult
  return tf.slice(result, [0, 0, 0], tf.pack([size, -1, -1]), name='fwd_prop_nodes')
开发者ID:rgobbel,项目名称:rntn,代码行数:31,代码来源:tf_rntn.py


示例11: decoding_loop

    def decoding_loop(self, train_mode: bool, sample: bool = False,
                      temperature: float = 1) -> LoopState:
        """Run the decoding while loop.

        Calls get_initial_loop_state and constructs tf.while_loop
        with the continuation criterion returned from loop_continue_criterion,
        and body function returned from get_body.

        After finishing the tf.while_loop, it calls finalize_loop
        to further postprocess the final decoder loop state (usually
        by stacking Tensors containing decoding histories).

        Arguments:
            train_mode: Boolean flag, telling whether this is
                a training run.
            sample: Boolean flag, telling whether we should sample
                the output symbols from the output distribution instead
                of using argmax or gold data.
            temperature: float value specifying the softmax temperature
        """
        initial_loop_state = self.get_initial_loop_state()
        with tf.control_dependencies([self.decoding_w, self.decoding_b]):
            final_loop_state = tf.while_loop(
                self.loop_continue_criterion,
                self.get_body(train_mode, sample, temperature),
                initial_loop_state,
                shape_invariants=tf.contrib.framework.nest.map_structure(
                    get_state_shape_invariants, initial_loop_state))
        self.finalize_loop(final_loop_state, train_mode)

        return final_loop_state
开发者ID:ufal,项目名称:neuralmonkey,代码行数:31,代码来源:autoregressive.py


示例12: _sample_n

  def _sample_n(self, n, seed=None):
    """Sample `n` draws from the DP. Draws from the base
    distribution are memoized across `n` and across calls to
    `sample()`.

    Draws from the base distribution are not memoized across the batch
    shape, i.e., each independent DP in the batch shape has its own
    memoized samples.

    Returns:
      tf.Tensor.
      A `tf.Tensor` of shape `[n] + batch_shape + event_shape`,
      where `n` is the number of samples for each DP,
      `batch_shape` is the number of independent DPs, and
      `event_shape` is the shape of the base distribution.

    #### Notes

    The implementation has one inefficiency, which is that it draws
    (batch_shape,) samples from the base distribution when adding a
    new persistent state. Ideally, we would only draw new samples for
    those in the loop which require it.
    """
    if seed is not None:
      raise NotImplementedError("seed is not implemented.")

    batch_shape = self.batch_shape.as_list()
    event_shape = self.event_shape.as_list()
    rank = 1 + len(batch_shape) + len(event_shape)
    # Note this is for scoping within the while loop's body function.
    self._temp_scope = [n, batch_shape, event_shape, rank]

    # Start at the beginning of the stick, i.e. the k'th index
    k = tf.constant(0)

    # Define boolean tensor. It is True for samples that require continuing
    # the while loop and False for samples that can receive their base
    # distribution (coin lands heads). Also note that we need one bool for
    # each sample
    bools = tf.ones([n] + batch_shape, dtype=tf.bool)

    # Initialize all samples as zero, they will be overwritten in any case
    draws = tf.zeros([n] + batch_shape + event_shape, dtype=self.base.dtype)

    # Calculate shape invariance conditions for locs and probs as these
    # can change shape between loop iterations.
    locs_shape = tf.TensorShape([None])
    probs_shape = tf.TensorShape([None])
    if len(self.locs.shape) > 1:
      locs_shape = locs_shape.concatenate(self.locs.shape[1:])
      probs_shape = probs_shape.concatenate(self.probs.shape[1:])

    # While we have not broken enough sticks, keep sampling.
    _, _, self._locs, self._probs, samples = tf.while_loop(
        self._sample_n_cond, self._sample_n_body,
        loop_vars=[k, bools, self.locs, self.probs, draws],
        shape_invariants=[
            k.shape, bools.shape, locs_shape, probs_shape, draws.shape])

    return samples
开发者ID:JoyceYa,项目名称:edward,代码行数:60,代码来源:dirichlet_process.py


示例13: decoding_loop

    def decoding_loop(self) -> BeamSearchOutput:
        """Create the decoding loop.

        This function mimics the behavior of the ``decoding_loop`` method of
        the ``AutoregressiveDecoder``, except the initial loop state is created
        outside this method because it is accessed and fed during ensembling.

        TODO: The ``finalize_loop`` method and the handling of attention loop
        states might be implemented in the future.

        Returns:
            This method returns a populated ``BeamSearchOutput`` object.
        """

        final_loop_state = tf.while_loop(
            self.loop_continue_criterion,
            self.get_body(),
            self.initial_loop_state,
            shape_invariants=tf.contrib.framework.nest.map_structure(
                get_state_shape_invariants, self.initial_loop_state))

        # TODO: return att_loop_states properly
        return BeamSearchOutput(
            last_search_step_output=final_loop_state.search_results,
            last_dec_loop_state=final_loop_state.decoder_loop_state,
            last_search_state=final_loop_state.search_state,
            attention_loop_states=[])
开发者ID:ufal,项目名称:neuralmonkey,代码行数:27,代码来源:beam_search_decoder.py


示例14: geometric

def geometric(p):
  i = tf.constant(0)
  sample = tf.while_loop(
      cond=lambda i: tf.cast(1 - Bernoulli(probs=p), tf.bool),
      body=lambda i: i + 1,
      loop_vars=[i])
  return sample
开发者ID:JoyceYa,项目名称:edward,代码行数:7,代码来源:pp_stochastic_control_flow.py


示例15: hinge_loss

 def hinge_loss(self, y_true, y_pred):
     # Custom loss function
     margin = 0.1
     # loop counter
     i = tf.constant(0)
     # loop condition function
     c = lambda i, _: tf.less(i, tf.shape(y_true)[0])
     outer_sum_loss = tf.constant(0.0)
     def process_ele(i, outer_sum_loss):
         # Get a subtensor from batch
         y_true_one = y_true[i]
         y_pred_one = y_pred[i]
         # Stack margin to a num_class*1 matrix
         margin_stack = tf.reshape(tf.stack([tf.constant(0.1)] * self.num_classes), [self.num_classes, 1])
         # Stack true label to a word_dim*num_class matrix and transpose it
         y_true_one_stack = tf.stack([tf.transpose(y_true_one)] * self.num_classes)
         # Reshape predict from (word_dim,) to (word_dim,1)
         y_pred_one_t = tf.reshape(y_pred_one, [self.word_dim, 1])
         # Calculate loss
         r = margin_stack - tf.matmul(y_true_one_stack, y_pred_one_t) + tf.matmul(self.label_vec_tensor, y_pred_one_t)
         # Summation
         # We did not exclude true label inside summation, so we subtract extra margin
         sum_inner_loss = tf.reduce_sum(K.relu(r)) - margin
         # Return counter++ and accumulated loss
         return tf.add(i, 1), tf.add(outer_sum_loss, sum_inner_loss)
     
     _, outer_sum_loss = tf.while_loop(c, process_ele, [i, outer_sum_loss])
     # Return average loss over batch
     return outer_sum_loss / tf.cast(tf.shape(y_true)[0], dtype=tf.float32)
开发者ID:s60912frank,项目名称:ML_MS3,代码行数:29,代码来源:devise_model.py


示例16: compute_states

    def compute_states(self,emb,idx_batch=0):


        num_leaves = tf.squeeze(tf.gather(self.num_leaves,idx_batch))
        #num_leaves=tf.Print(num_leaves,[num_leaves])
        n_inodes = tf.gather(self.n_inodes,idx_batch)
        #embx=tf.gather(emb,tf.range(num_leaves))
        embx=tf.gather(tf.gather(emb,idx_batch),tf.range(num_leaves))
        #treestr=self.treestr#tf.gather(self.treestr,tf.range(self.n_inodes))
        treestr=tf.gather(tf.gather(self.treestr,idx_batch),tf.range(n_inodes))
        leaf_hc = self.process_leafs(embx)
        leaf_h,leaf_c=tf.split(1,2,leaf_hc)


        node_h=tf.identity(leaf_h)
        node_c=tf.identity(leaf_c)

        idx_var=tf.constant(0) #tf.Variable(0,trainable=False)

        with tf.variable_scope("Composition",reuse=True):

            cW = tf.get_variable("cW",[self.degree*self.hidden_dim,(self.degree+3)*self.hidden_dim])
            cb = tf.get_variable("cb",[4*self.hidden_dim])
            bu,bo,bi,bf=tf.split(0,4,cb)

            def _recurrence(node_h,node_c,idx_var):
                node_info=tf.gather(treestr,idx_var)

                child_h=tf.gather(node_h,node_info)
                child_c=tf.gather(node_c,node_info)

                flat_ = tf.reshape(child_h,[-1])
                tmp=tf.matmul(tf.expand_dims(flat_,0),cW)
                u,o,i,fl,fr=tf.split(1,5,tmp)

                i=tf.nn.sigmoid(i+bi)
                o=tf.nn.sigmoid(o+bo)
                u=tf.nn.tanh(u+bu)
                fl=tf.nn.sigmoid(fl+bf)
                fr=tf.nn.sigmoid(fr+bf)

                f=tf.concat(0,[fl,fr])
                c = i * u + tf.reduce_sum(f*child_c,[0])
                h = o * tf.nn.tanh(c)

                node_h = tf.concat(0,[node_h,h])

                node_c = tf.concat(0,[node_c,c])

                idx_var=tf.add(idx_var,1)

                return node_h,node_c,idx_var
            loop_cond = lambda a1,b1,idx_var: tf.less(idx_var,n_inodes)

            loop_vars=[node_h,node_c,idx_var]
            node_h,node_c,idx_var=tf.while_loop(loop_cond, _recurrence,
                                                loop_vars,parallel_iterations=10)

            return node_h
开发者ID:Chelz,项目名称:RecursiveNN,代码行数:59,代码来源:tf_tree_lstm.py


示例17: fn1

        def fn1(a, b):
            i = tf.constant(0)

            def cd(i): return tf.less(i, 10)

            def bd(i): return tf.add(i, 1)
            res = tf.while_loop(cd, bd, [i])
            return tf.multiply(tf.add(20, res), 10)
开发者ID:bddppq,项目名称:tvm,代码行数:8,代码来源:test_control_flow.py


示例18: while_module_fn

def while_module_fn():
  """Compute x^n with while_loop."""
  x = tf.placeholder(dtype=tf.float32, name="x", shape=[])
  n = tf.placeholder(dtype=tf.int32, name="n")
  _, pow_x = tf.while_loop(
      lambda i, ix: i < n, lambda i, ix: [tf.add(i, 1), ix * x],
      [tf.constant(0), tf.constant(1.0)])
  hub.add_signature(inputs={"x": x, "n": n}, outputs=pow_x)
开发者ID:jankim,项目名称:hub,代码行数:8,代码来源:native_module_test.py


示例19: custom_dynamic_rnn

def custom_dynamic_rnn(cell, inputs, inputs_len, initial_state=None):
    """
    Implements a dynamic rnn that can store scores in the pointer network,
    the reason why we implements this is that the raw_rnn or dynamic_rnn function in Tensorflow
    seem to require the hidden unit and memory unit has the same dimension, and we cannot
    store the scores directly in the hidden unit.
    Args:
        cell: RNN cell
        inputs: the input sequence to rnn
        inputs_len: valid length
        initial_state: initial_state of the cell
    Returns:
        outputs and state
    """
    batch_size = tf.shape(inputs)[0]
    max_time = tf.shape(inputs)[1]

    inputs_ta = tf.TensorArray(dtype=tf.float32, size=max_time)
    inputs_ta = inputs_ta.unstack(tf.transpose(inputs, [1, 0, 2]))
    emit_ta = tf.TensorArray(dtype=tf.float32, dynamic_size=True, size=0)
    t0 = tf.constant(0, dtype=tf.int32)
    if initial_state is not None:
        s0 = initial_state
    else:
        s0 = cell.zero_state(batch_size, dtype=tf.float32)
    f0 = tf.zeros([batch_size], dtype=tf.bool)

    def loop_fn(t, prev_s, emit_ta, finished):
        """
        the loop function of rnn
        """
        cur_x = inputs_ta.read(t)
        scores, cur_state = cell(cur_x, prev_s)

        # copy through
        scores = tf.where(finished, tf.zeros_like(scores), scores)

        if isinstance(cell, tc.rnn.LSTMCell):
            cur_c, cur_h = cur_state
            prev_c, prev_h = prev_s
            cur_state = tc.rnn.LSTMStateTuple(tf.where(finished, prev_c, cur_c),
                                              tf.where(finished, prev_h, cur_h))
        else:
            cur_state = tf.where(finished, prev_s, cur_state)

        emit_ta = emit_ta.write(t, scores)
        finished = tf.greater_equal(t + 1, inputs_len)
        return [t + 1, cur_state, emit_ta, finished]

    _, state, emit_ta, _ = tf.while_loop(
        cond=lambda _1, _2, _3, finished: tf.logical_not(tf.reduce_all(finished)),
        body=loop_fn,
        loop_vars=(t0, s0, emit_ta, f0),
        parallel_iterations=32,
        swap_memory=False)

    outputs = tf.transpose(emit_ta.stack(), [1, 0, 2])
    return outputs, state
开发者ID:jiangyuenju,项目名称:DuReader,代码行数:58,代码来源:pointer_net.py


示例20: construct_tensor_array

 def construct_tensor_array(self):
     loop_condition = lambda tensor_array, i: \
                      tf.less(i, tf.squeeze(tf.shape(self.is_a_leaf)))
     #iterate over all leaves + composition
     tensor_array_op = tf.while_loop(cond=loop_condition,
                                     body=self._loop_over_tree,
                                     loop_vars=[self.tensor_array, 0],
                                     parallel_iterations=1)[0]
     return tensor_array_op
开发者ID:kingtaurus,项目名称:cs224d,代码行数:9,代码来源:rnn_tensorarray.py



注:本文中的tensorflow.while_loop函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。


鲜花

握手

雷人

路过

鸡蛋
该文章已有0人参与评论

请发表评论

全部评论

专题导读
上一篇:
Python tensorflow.zeros函数代码示例发布时间:2022-05-27
下一篇:
Python tensorflow.where函数代码示例发布时间:2022-05-27
热门推荐
阅读排行榜

扫描微信二维码

查看手机版网站

随时了解更新最新资讯

139-2527-9053

在线客服(服务时间 9:00~18:00)

在线QQ客服
地址:深圳市南山区西丽大学城创智工业园
电邮:jeky_zhao#qq.com
移动电话:139-2527-9053

Powered by 互联科技 X3.4© 2001-2213 极客世界.|Sitemap