• 设为首页
  • 点击收藏
  • 手机版
    手机扫一扫访问
    迪恩网络手机版
  • 关注官方公众号
    微信扫一扫关注
    公众号

Python tensorflow.reverse_sequence函数代码示例

原作者: [db:作者] 来自: [db:来源] 收藏 邀请

本文整理汇总了Python中tensorflow.reverse_sequence函数的典型用法代码示例。如果您正苦于以下问题:Python reverse_sequence函数的具体用法?Python reverse_sequence怎么用?Python reverse_sequence使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。



在下文中一共展示了reverse_sequence函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。

示例1: inference

    def inference(self, X, length, reuse=False):
        length_64 = tf.cast(length, tf.int64)
        with tf.variable_scope("bilstm", reuse=reuse):
            forward_output, _ = tf.nn.dynamic_rnn(
                tf.contrib.rnn.LSTMCell(self.num_hidden,
                                        reuse=reuse),
                X,
                dtype=tf.float32,
                sequence_length=length,
                scope="RNN_forward")
            backward_output_, _ = tf.nn.dynamic_rnn(
                tf.contrib.rnn.LSTMCell(self.num_hidden,
                                        reuse=reuse),
                inputs=tf.reverse_sequence(X,
                                           length_64,
                                           seq_dim=1),
                dtype=tf.float32,
                sequence_length=length,
                scope="RNN_backword")

        backward_output = tf.reverse_sequence(backward_output_,
                                              length_64,
                                              seq_dim=1)

        output = tf.concat([forward_output, backward_output], 2)
        output = tf.reshape(output, [-1, self.num_hidden * 2])
        if reuse is None or not reuse:
            output = tf.nn.dropout(output, 0.5)

        matricized_unary_scores = tf.matmul(output, self.W) + self.b
        unary_scores = tf.reshape(
            matricized_unary_scores,
            [-1, self.max_seq_len, self.num_tags],
            name="Reshape_7" if reuse else None)
        return unary_scores
开发者ID:koth,项目名称:kcws,代码行数:35,代码来源:bilstm.py


示例2: _composition_function

 def _composition_function(self, inputs, length, init_state=None):
     if self._composition == "GRU":
         cell = GRUCell(self._size)
         return dynamic_rnn(cell, inputs, sequence_length=length, time_major=True,
                            initial_state=init_state, dtype=tf.float32)[0]
     elif self._composition == "LSTM":
         cell = BasicLSTMCell(self._size)
         init_state = tf.concat(1, [tf.zeros_like(init_state, tf.float32), init_state]) if init_state else None
         outs = dynamic_rnn(cell, inputs, sequence_length=length, time_major=True,
                            initial_state=init_state, dtype=tf.float32)[0]
         return outs
     elif self._composition == "BiGRU":
         cell = GRUCell(self._size // 2, self._size)
         init_state_fw, init_state_bw = tf.split(1, 2, init_state) if init_state else (None, None)
         with tf.variable_scope("forward"):
             fw_outs = dynamic_rnn(cell, inputs, sequence_length=length, time_major=True,
                                   initial_state=init_state_fw, dtype=tf.float32)[0]
         with tf.variable_scope("backward"):
             rev_inputs = tf.reverse_sequence(tf.pack(inputs), length, 0, 1)
             rev_inputs = [tf.reshape(x, [-1, self._size]) for x in tf.split(0, len(inputs), rev_inputs)]
             bw_outs = dynamic_rnn(cell, rev_inputs, sequence_length=length, time_major=True,
                                   initial_state=init_state_bw, dtype=tf.float32)[0]
             bw_outs = tf.reverse_sequence(tf.pack(bw_outs), length, 0, 1)
             bw_outs = [tf.reshape(x, [-1, self._size]) for x in tf.split(0, len(inputs), bw_outs)]
         return [tf.concat(1, [fw_out, bw_out]) for fw_out, bw_out in zip(fw_outs, bw_outs)]
     else:
         raise NotImplementedError("Other compositions not implemented yet.")
开发者ID:MorLong,项目名称:qa_network,代码行数:27,代码来源:qa_network.py


示例3: set_observations

  def set_observations(self, observations, seq_lengths):
    """Stores the model's observations.

    Stores the observations (inputs and targets) in TensorArrays and precomputes
    things for later like the reverse RNN output and encoded targets.

    Args:
      observations: The observations of the model, a tuple containing two
        Tensors of shape [max_seq_len, batch_size, data_size]. The Tensors
        should be the inputs and targets, respectively.
      seq_lengths: An int Tensor of shape [batch_size] containing the length
        of each sequence in observations.
    """
    inputs, targets = observations
    self.seq_lengths = seq_lengths
    self.max_seq_len = tf.reduce_max(seq_lengths)
    self.inputs_ta = base.ta_for_tensor(inputs, clear_after_read=False)
    self.targets_ta = base.ta_for_tensor(targets, clear_after_read=False)
    targets_encoded = base.encode_all(targets, self.data_encoder)
    self.targets_encoded_ta = base.ta_for_tensor(targets_encoded,
                                                 clear_after_read=False)
    if self.rev_rnn_cell:
      reverse_targets_encoded = tf.reverse_sequence(
          targets_encoded, seq_lengths, seq_axis=0, batch_axis=1)
      # Compute the reverse rnn over the targets.
      reverse_rnn_out, _ = tf.nn.dynamic_rnn(self.rev_rnn_cell,
                                             reverse_targets_encoded,
                                             time_major=True,
                                             dtype=tf.float32)
      reverse_rnn_out = tf.reverse_sequence(reverse_rnn_out, seq_lengths,
                                            seq_axis=0, batch_axis=1)
      self.reverse_rnn_ta = base.ta_for_tensor(reverse_rnn_out,
                                               clear_after_read=False)
开发者ID:812864539,项目名称:models,代码行数:33,代码来源:vrnn.py


示例4: __call__

    def __call__(self, inputs, seq_len, keep_prob=1.0, is_train=None, concat_layers=True):
        outputs = [tf.transpose(inputs, [1, 0, 2])]
        for layer in range(self.num_layers):
            gru_fw, gru_bw = self.grus[layer]
            init_fw, init_bw = self.inits[layer]
            mask_fw, mask_bw = self.dropout_mask[layer]
            with tf.variable_scope('fw_{}'.format(layer), reuse=tf.AUTO_REUSE):
                with tf.variable_scope('cudnn_gru', reuse=tf.AUTO_REUSE):
                    out_fw, _ = tf.nn.dynamic_rnn(cell=gru_fw, inputs=outputs[-1] * mask_fw, time_major=True,
                                                  initial_state=tuple(tf.unstack(init_fw, axis=0)))

            with tf.variable_scope('bw_{}'.format(layer), reuse=tf.AUTO_REUSE):
                with tf.variable_scope('cudnn_gru', reuse=tf.AUTO_REUSE):
                    inputs_bw = tf.reverse_sequence(
                        outputs[-1] * mask_bw, seq_lengths=seq_len, seq_dim=0, batch_dim=1)
                    out_bw, _ = tf.nn.dynamic_rnn(cell=gru_bw, inputs=inputs_bw, time_major=True,
                                                  initial_state=tuple(tf.unstack(init_bw, axis=0)))
                    out_bw = tf.reverse_sequence(
                        out_bw, seq_lengths=seq_len, seq_dim=0, batch_dim=1)

            outputs.append(tf.concat([out_fw, out_bw], axis=2))
        if concat_layers:
            res = tf.concat(outputs[1:], axis=2)
        else:
            res = outputs[-1]
        res = tf.transpose(res, [1, 0, 2])
        return res
开发者ID:RileyShe,项目名称:DeepPavlov,代码行数:27,代码来源:utils.py


示例5: build

    def build(self):
        print('Building model')
        self.x_embeddings = tf.Variable(
            tf.random_normal([self.alphabet_src_size, self.embedd_dims],
            stddev=0.1), name='x_embeddings')
        self.t_embeddings = tf.Variable(
            tf.random_normal([self.alphabet_tar_size, self.embedd_dims],
            stddev=0.1), name='t_embeddings')

        X_embedded = tf.gather(self.x_embeddings, self.Xs, name='embed_X')
        t_embedded = tf.gather(self.t_embeddings, self.ts_go, name='embed_t')

        with tf.variable_scope('dense_out'):
            W_out = tf.get_variable('W_out', [self.word_encoder_units*2, self.alphabet_tar_size])
            b_out = tf.get_variable('b_out', [self.alphabet_tar_size])

        # forward encoding
        char_enc_state, char_enc_out = encoder(X_embedded, self.X_len, 'char_encoder', self.char_encoder_units)
        char2word = _grid_gather(char_enc_out, self.X_spaces)
        char2word.set_shape([None, None, self.char_encoder_units])
        word_enc_state, word_enc_out = encoder(char2word, self.X_spaces_len, 'word_encoder', self.word_encoder_units)

        # backward encoding words
        char2word = tf.reverse_sequence(char2word, tf.to_int64(self.X_spaces_len), 1)
        char2word.set_shape([None, None, self.char_encoder_units])
        word_enc_state_bck, word_enc_out_bck = encoder(char2word, self.X_spaces_len, 'word_encoder_backwards', self.word_encoder_units)
        word_enc_out_bck = tf.reverse_sequence(word_enc_out_bck, tf.to_int64(self.X_spaces_len), 1)

        word_enc_state = tf.concat(1, [word_enc_state, word_enc_state_bck])
        word_enc_out = tf.concat(2, [word_enc_out, word_enc_out_bck])

        # decoding
        dec_state, dec_out, valid_dec_out, valid_attention_tracker = (
            attention_decoder(word_enc_out, self.X_spaces_len, word_enc_state,
                              t_embedded, self.t_len, self.attn_units,
                              self.t_embeddings, W_out, b_out))

        out_tensor = tf.reshape(dec_out, [-1, self.word_encoder_units*2])
        out_tensor = tf.matmul(out_tensor, W_out) + b_out
        out_shape = tf.concat(0, [tf.expand_dims(tf.shape(self.X_len)[0], 0),
                                  tf.expand_dims(tf.shape(t_embedded)[1], 0),
                                  tf.expand_dims(tf.constant(self.alphabet_tar_size), 0)])
        self.valid_attention_tracker = valid_attention_tracker.pack()
        self.out_tensor = tf.reshape(out_tensor, out_shape)
        self.out_tensor.set_shape([None, None, self.alphabet_tar_size])

        valid_out_tensor = tf.reshape(valid_dec_out, [-1, self.word_encoder_units*2])
        valid_out_tensor = tf.matmul(valid_out_tensor, W_out) + b_out
        self.valid_out_tensor = tf.reshape(valid_out_tensor, out_shape)

        self.out = None

        # add TensorBoard summaries for all variables
        tf.contrib.layers.summarize_variables()
开发者ID:Styrke,项目名称:master-code,代码行数:54,代码来源:default.py


示例6: inference

    def inference(self, wX, cX, reuse=None, trainMode=True):
        word_vectors = tf.nn.embedding_lookup(self.words, wX)
        char_vectors = tf.nn.embedding_lookup(self.chars, cX)
        char_vectors = tf.reshape(char_vectors, [-1, FLAGS.max_sentence_len,
                                                 FLAGS.max_chars_per_word,
                                                 FLAGS.embedding_char_size])
        char_vectors = tf.transpose(char_vectors, perm=[1, 0, 2, 3])
        char_vectors = tf.expand_dims(char_vectors, -1)
        length = self.length(wX)
        length_64 = tf.cast(length, tf.int64)

        # do conv
        def do_char_conv(x): return self.char_convolution(x)
        char_vectors_x = tf.map_fn(do_char_conv, char_vectors)
        char_vectors_x = tf.transpose(char_vectors_x, perm=[1, 0, 2])
        word_vectors = tf.concat([word_vectors, char_vectors_x], axis=2)
        # if trainMode:
        #  word_vectors = tf.nn.dropout(word_vectors, 0.5)
        reuse = None if trainMode else True
        with tf.variable_scope("rnn_fwbw", reuse=reuse) as scope:
            forward_output, _ = tf.nn.dynamic_rnn(
                tf.contrib.rnn.LSTMCell(self.numHidden,
                                        reuse=reuse),
                word_vectors,
                dtype=tf.float32,
                sequence_length=length,
                scope="RNN_forward")
            backward_output_, _ = tf.nn.dynamic_rnn(
                tf.contrib.rnn.LSTMCell(self.numHidden,
                                        reuse=reuse),
                inputs=tf.reverse_sequence(word_vectors,
                                           length_64,
                                           seq_dim=1),
                dtype=tf.float32,
                sequence_length=length,
                scope="RNN_backword")

        backward_output = tf.reverse_sequence(backward_output_,
                                              length_64,
                                              seq_dim=1)

        output = tf.concat([forward_output, backward_output], 2)
        output = tf.reshape(output, [-1, self.numHidden * 2])
        if trainMode:
            output = tf.nn.dropout(output, 0.5)

        matricized_unary_scores = tf.matmul(output, self.W) + self.b
        # matricized_unary_scores = tf.nn.log_softmax(matricized_unary_scores)
        unary_scores = tf.reshape(
            matricized_unary_scores,
            [-1, FLAGS.max_sentence_len, self.distinctTagNum])

        return unary_scores, length
开发者ID:koth,项目名称:kcws,代码行数:53,代码来源:train_pos.py


示例7: cudnn_bi_gru

def cudnn_bi_gru(units,
                 n_hidden,
                 seq_lengths=None,
                 n_layers=1,
                 trainable_initial_states=False,
                 name='cudnn_bi_gru',
                 reuse=False):
    """ Fast CuDNN Bi-GRU implementation

    Args:
        units: tf.Tensor with dimensions [B x T x F], where
            B - batch size
            T - number of tokens
            F - features
        n_hidden: dimensionality of hidden state
        seq_lengths: number of tokens in each sample in the batch
        n_layers: number of layers
        trainable_initial_states: whether to create a special trainable variable
                to initialize the hidden states of the network or use just zeros
        name: name of the variable scope to use
        reuse:whether to reuse already initialized variable


    Returns:
        h - all hidden states along T dimension,
            tf.Tensor with dimensionality [B x T x F]
        h_last - last hidden state, tf.Tensor with dimensionality [B x H * 2]
            where H - number of hidden units
    """

    with tf.variable_scope(name, reuse=reuse):
        if seq_lengths is None:
            seq_lengths = tf.ones([tf.shape(units)[0]], dtype=tf.int32) * tf.shape(units)[1]
        with tf.variable_scope('Forward'):
            h_fw, h_last_fw = cudnn_gru_wrapper(units,
                                                n_hidden,
                                                n_layers=n_layers,
                                                trainable_initial_states=trainable_initial_states,
                                                seq_lengths=seq_lengths,
                                                reuse=reuse)

        with tf.variable_scope('Backward'):
            reversed_units = tf.reverse_sequence(units, seq_lengths=seq_lengths, seq_dim=1, batch_dim=0)
            h_bw, h_last_bw = cudnn_gru_wrapper(reversed_units,
                                                n_hidden,
                                                n_layers=n_layers,
                                                trainable_initial_states=trainable_initial_states,
                                                seq_lengths=seq_lengths,
                                                reuse=reuse)
            h_bw = tf.reverse_sequence(h_bw, seq_lengths=seq_lengths, seq_dim=1, batch_dim=0)

    return (h_fw, h_bw), (h_last_fw, h_last_bw)
开发者ID:RileyShe,项目名称:DeepPavlov,代码行数:52,代码来源:tf_layers.py


示例8: testShapeFunctionEdgeCases

  def testShapeFunctionEdgeCases(self):
    # Batch size mismatched between input and seq_lengths.
    with self.assertRaises(ValueError):
      tf.reverse_sequence(
          tf.placeholder(tf.float32, shape=(32, 2, 3)),
          seq_lengths=tf.placeholder(tf.int64, shape=(33,)),
          seq_dim=3)

    # seq_dim out of bounds.
    with self.assertRaisesRegexp(ValueError, "seq_dim must be < input.dims()"):
      tf.reverse_sequence(
          tf.placeholder(tf.float32, shape=(32, 2, 3)),
          seq_lengths=tf.placeholder(tf.int64, shape=(32,)),
          seq_dim=3)
开发者ID:nickicindy,项目名称:tensorflow,代码行数:14,代码来源:reverse_sequence_op_test.py


示例9: lstm_seq2seq_internal

def lstm_seq2seq_internal(inputs, targets, hparams, train):
  """The basic LSTM seq2seq model, main step used for training."""
  with tf.variable_scope("lstm_seq2seq"):
    if inputs is not None:
      inputs_length = common_layers.length_from_embedding(inputs)
      # Flatten inputs.
      inputs = common_layers.flatten4d3d(inputs)

      # LSTM encoder.
      inputs = tf.reverse_sequence(inputs, inputs_length, seq_axis=1)
      _, final_encoder_state = lstm(inputs, inputs_length, hparams, train,
                                    "encoder")
    else:
      final_encoder_state = None

    # LSTM decoder.
    shifted_targets = common_layers.shift_right(targets)
    # Add 1 to account for the padding added to the left from shift_right
    targets_length = common_layers.length_from_embedding(shifted_targets) + 1
    decoder_outputs, _ = lstm(
        common_layers.flatten4d3d(shifted_targets),
        targets_length,
        hparams,
        train,
        "decoder",
        initial_state=final_encoder_state)
    return tf.expand_dims(decoder_outputs, axis=2)
开发者ID:qixiuai,项目名称:tensor2tensor,代码行数:27,代码来源:lstm.py


示例10: testFloatReverseSequenceGrad

  def testFloatReverseSequenceGrad(self):
    x = np.asarray([
        [[1, 2, 3, 4], [5, 6, 7, 8]],
        [[9, 10, 11, 12], [13, 14, 15, 16]],
        [[17, 18, 19, 20], [21, 22, 23, 24]]], dtype=np.float)
    x = x.reshape(3, 2, 4, 1, 1)
    x = x.transpose([2, 1, 0, 3, 4])  # transpose axes 0 <=> 2

    # reverse dim 0 up to (0:3, none, 0:4) along dim=2
    seq_dim = 0
    batch_dim = 2
    seq_lengths = np.asarray([3, 0, 4], dtype=np.int64)

    with self.test_session():
      input_t = tf.constant(x, shape=x.shape)
      seq_lengths_t = tf.constant(seq_lengths, shape=seq_lengths.shape)
      reverse_sequence_out = tf.reverse_sequence(input_t,
                                                 batch_dim=batch_dim,
                                                 seq_dim=seq_dim,
                                                 seq_lengths=seq_lengths_t)
      err = tf.test.compute_gradient_error(input_t,
                                           x.shape,
                                           reverse_sequence_out,
                                           x.shape,
                                           x_init_value=x)
    print("ReverseSequence gradient error = %g" % err)
    self.assertLess(err, 1e-8)
开发者ID:peace195,项目名称:tensorflow,代码行数:27,代码来源:reverse_sequence_op_test.py


示例11: step

  def step(self, time_, inputs, state, name=None):
    cell_output, cell_state = self.cell(inputs, state)
    cell_output_new, logits, attention_scores, attention_context = \
      self.compute_output(cell_output)

    if self.reverse_scores_lengths is not None:
      attention_scores = tf.reverse_sequence(
          input=attention_scores,
          seq_lengths=self.reverse_scores_lengths,
          seq_dim=1,
          batch_dim=0)

    sample_ids = self.helper.sample(
        time=time_, outputs=logits, state=cell_state)

    outputs = AttentionDecoderOutput(
        logits=logits,
        predicted_ids=sample_ids,
        cell_output=cell_output_new,
        attention_scores=attention_scores,
        attention_context=attention_context)

    finished, next_inputs, next_state = self.helper.next_inputs(
        time=time_, outputs=outputs, state=cell_state, sample_ids=sample_ids)

    return (outputs, next_state, next_inputs, finished)
开发者ID:AbhinavJain13,项目名称:seq2seq,代码行数:26,代码来源:attention_decoder.py


示例12: __init__

    def __init__(self, embedding=None, hidden_state_d=100, max_length=80, learning_rate=0.001, dropout_rate=0.5, vocab_size=400001, embedding_d=300, num_classes=2):
        self.data = tf.placeholder(dtype=tf.int32, shape=[None, max_length])
        self.len = tf.placeholder(dtype=tf.int32, shape=[None])
        self.label = tf.placeholder(dtype=tf.float32, shape=[None])

        self.neg_label = 1 - self.label

        self.co_label = tf.transpose(tf.reshape(tf.concat(0, [self.label, self.neg_label]), [2, -1]))

        self.init_embedding(embedding, vocab_size, embedding_d)

        # filter len to maxlength
        self.maxlen = tf.cast(tf.fill([tf.shape(self.len)[0]], max_length), tf.int64)
        self.filter = tf.less_equal(tf.cast(self.len, tf.int64), self.maxlen)
        self.clean_len = tf.select(self.filter, tf.cast(self.len, tf.int64), self.maxlen)

        self.vec_data = tf.nn.embedding_lookup(self.embedding, self.data)
        self.reversed_vec_data = tf.reverse_sequence(self.vec_data, seq_dim=1, seq_lengths=self.clean_len)

        with tf.variable_scope('left2right'):
            left2right_lstm_cell = tf.nn.rnn_cell.BasicLSTMCell(hidden_state_d, state_is_tuple=True)
            self.output, self.state = tf.nn.dynamic_rnn(
                left2right_lstm_cell,
                self.vec_data,
                dtype=tf.float32,
                sequence_length=self.len,
            )

        with tf.variable_scope('right2left'):
            right2left_lstm_cell = tf.nn.rnn_cell.BasicLSTMCell(hidden_state_d, state_is_tuple=True)
            self.reversed_output, self.reversed_state = tf.nn.dynamic_rnn(
                right2left_lstm_cell,
                self.reversed_vec_data,
                dtype=tf.float32,
                sequence_length=self.len,
            )

        self.last = BiLSTM.last_relevant(self.output, self.len)
        self.reversed_last = BiLSTM.last_relevant(self.reversed_output, self.len)

        self.final_output = tf.concat(1, [self.last, self.reversed_last])

        self.dropout_last = tf.nn.dropout(self.final_output, keep_prob=dropout_rate)

        self.weight = tf.Variable(tf.truncated_normal([hidden_state_d * 2, num_classes], stddev=0.1))
        self.bias = tf.Variable(tf.constant(0.1, shape=[num_classes]))
        self.prediction = tf.nn.softmax(tf.matmul(self.final_output, self.weight) + self.bias)

        self.cost = tf.nn.softmax_cross_entropy_with_logits(tf.matmul(self.dropout_last, self.weight) + self.bias, self.co_label)
        self.train_op = tf.train.AdamOptimizer(learning_rate=learning_rate).minimize(self.cost)
        self.init_op = tf.initialize_all_variables()

        self.prediction_a = tf.argmax(self.prediction, dimension=1)
        self.prediction_b = tf.argmax(self.co_label, dimension=1)

        self.score = tf.reduce_sum(tf.cast(tf.equal(self.prediction_a, self.prediction_b), dtype=tf.int32)) / tf.size(self.label)

        self.sess = tf.Session()
        self.sess.run(self.init_op)
开发者ID:easonnie,项目名称:landOfflol,代码行数:59,代码来源:biLSTM.py


示例13: _bidirectional_rnn

 def _bidirectional_rnn(self, data, length):
     length_64 = tf.cast(length, tf.int64)
     forward, _ = tf.nn.dynamic_rnn(
         cell=self.params.rnn_cell(self.params.rnn_hidden),
         inputs=data,
         dtype=tf.float32,
         sequence_length=length,
         scope='rnn-forward')
     backward, _ = tf.nn.dynamic_rnn(
         cell=self.params.rnn_cell(self.params.rnn_hidden),
         inputs=tf.reverse_sequence(data, length_64, seq_dim=1),
         dtype=tf.float32,
         sequence_length=self.length,
         scope='rnn-backward')
     backward = tf.reverse_sequence(backward, length_64, seq_dim=1)
     output = tf.concat(2, [forward, backward])
     return output
开发者ID:Decalogue,项目名称:tensorflowbook,代码行数:17,代码来源:BidirectionalSequenceLabellingModel.py


示例14: __init__

    def __init__(self, input_, length_, hidden_state_d, name, cell=None, input_keep_rate=1.0, output_keep_rate=1.0,
                 init_state=None):
        """
        lstm_step, input_d, hidden_state_d
        :param name:
        :return:
        self.input  (shape=[None, lstm_step, input_d], dtype=tf.float32, name='input')
        self.length (shape=[None], dtype=tf.int32, name='length')
        """
        with tf.variable_scope(name):
            self.input = input_
            self.length = length_

            self.reverse_input = tf.reverse_sequence(self.input, seq_dim=1, seq_lengths=tf.cast(self.length, tf.int64))

            if len(cell) > 1:
                cell_f, cell_r = cell
            elif len(cell) == 1:
                cell_f = cell[0]
                cell_r = cell[0]
            else:  # cell is None
                cell_f = tf.nn.rnn_cell.BasicLSTMCell(hidden_state_d, state_is_tuple=True)
                cell_r = tf.nn.rnn_cell.BasicLSTMCell(hidden_state_d, state_is_tuple=True)

            if not init_state:
                init_state_f = None
                init_state_b = None
            elif len(init_state) > 1:
                init_state_f = init_state[0]
                init_state_b = init_state[1]
            else:
                init_state_f = init_state[0]
                init_state_b = init_state[0]

            # print('blala', init_state_f)
            # print('blala', init_state_b)

            with tf.variable_scope('forward'):
                self.output, self.last_state = tf.nn.dynamic_rnn(
                    cell_f,
                    tf.nn.dropout(self.input, input_keep_rate),
                    dtype=tf.float32,
                    sequence_length=self.length,
                    initial_state=init_state_f
                )
                self.last = tf.nn.dropout(BasicSeqModel.last_relevant(self.output, self.length),
                                          output_keep_rate)

            with tf.variable_scope('backward'):
                self.reverse_output, self.reverse_last_state = tf.nn.dynamic_rnn(
                    cell_r,
                    tf.nn.dropout(self.reverse_input, input_keep_rate),
                    dtype=tf.float32,
                    sequence_length=self.length,
                    initial_state=init_state_b
                )
                self.reverse_last = tf.nn.dropout(BasicSeqModel.last_relevant(self.reverse_output, self.length),
                                                  output_keep_rate)
开发者ID:BinbinBian,项目名称:expr_snli,代码行数:58,代码来源:base.py


示例15: testShapeFunctionEdgeCases

  def testShapeFunctionEdgeCases(self):
    # Batch size mismatched between input and seq_lengths.
    with self.assertRaises(ValueError):
      tf.reverse_sequence(
          tf.placeholder(tf.float32, shape=(32, 2, 3)),
          seq_lengths=tf.placeholder(tf.int64, shape=(33,)),
          seq_dim=3)

    # seq_dim out of bounds.
    with self.assertRaisesRegexp(ValueError, "seq_dim must be < input.dims()"):
      tf.reverse_sequence(
          tf.placeholder(tf.float32, shape=(32, 2, 3)),
          seq_lengths=tf.placeholder(tf.int64, shape=(32,)),
          seq_dim=3)

    # batch_dim out of bounds.
    with self.assertRaisesRegexp(
        ValueError, "batch_dim must be < input.dims()"):
      tf.reverse_sequence(
          tf.placeholder(tf.float32, shape=(32, 2, 3)),
          seq_lengths=tf.placeholder(tf.int64, shape=(32,)),
          seq_dim=0,
          batch_dim=3)

    with self.test_session():
      inputs = tf.placeholder(tf.float32, shape=(32, 2, 3))
      seq_lengths = tf.placeholder(tf.int64, shape=(32,))
      output = tf.reverse_sequence(
          inputs,
          seq_lengths=seq_lengths,
          seq_dim=0)  # batch_dim default is 0
      with self.assertRaisesOpError("batch_dim == seq_dim"):
        output.eval(feed_dict={inputs: np.random.rand(32, 2, 3),
                               seq_lengths: xrange(32)})
开发者ID:peace195,项目名称:tensorflow,代码行数:34,代码来源:reverse_sequence_op_test.py


示例16: bidirectional_rnn

  def bidirectional_rnn(self, cell, inputs, lengths, scope=None):
    name = scope.name or "BiRNN"
    # Forward direction
    with vs.variable_scope(name + "_FW") as fw_scope:
      output_fw, output_state_fw = rnn.dynamic_rnn(cell, inputs, time_major=True, dtype=dtypes.float32,
                                                   sequence_length=lengths, scope=fw_scope)
    # Backward direction
    inputs_bw = tf.reverse_sequence(inputs, tf.to_int64(lengths), seq_dim=0, batch_dim=1)
    with vs.variable_scope(name + "_BW") as bw_scope:
      output_bw, output_state_bw = rnn.dynamic_rnn(cell, inputs_bw, time_major=True, dtype=dtypes.float32,
                                                   sequence_length=lengths, scope=bw_scope)

    output_bw = tf.reverse_sequence(output_bw, tf.to_int64(lengths), seq_dim=0, batch_dim=1)

    outputs = output_fw + output_bw
    output_state = output_state_fw + output_state_bw

    return (outputs, output_state)
开发者ID:sdlg,项目名称:nlc,代码行数:18,代码来源:nlc_model.py


示例17: encode

 def encode(self, features, labels):
   
   features["source_ids"] = tf.reverse_sequence(features["source_ids"], features["source_len"], batch_dim=0, seq_dim=1)  # [[1,2,3,4,PAD,PAD,PAD],[2,3,PAD,PAD,PAD,PAD,PAD]]   [4,2]
   features["source_ids"] = tf.reverse(features["source_ids"],[1])  # --> [[4,3,2,1,PAD,PAD,PAD],[3,2,PAD,PAD,PAD,PAD,PAD]] --> [[PAD,PAD,PAD,1,2,3,4],[PAD,PAD,PAD,PAD,PAD,2,3]]
    
   source_embedded = tf.nn.embedding_lookup(self.source_embedding_fairseq(),
                                            features["source_ids"])
   encoder_fn = self.encoder_class(self.params["encoder.params"], self.mode, self.source_pos_embedding_fairseq())
   return encoder_fn(source_embedded, features["source_len"])
开发者ID:clren,项目名称:conv_seq2seq,代码行数:9,代码来源:conv_seq2seq.py


示例18: reverse

 def reverse(self):
     x = tf.Variable(self.image, name='x')
     model = tf.initialize_all_variables()
     
     with tf.Session() as session:
         x = tf.reverse_sequence(x, [self.width] * self.height, 1, batch_dim=0)
         session.run(model)
         result = session.run(x)
     return result
开发者ID:KellyChan,项目名称:python-examples,代码行数:9,代码来源:arrays.py


示例19: bw_dynamic_rnn

def bw_dynamic_rnn(cell, inputs, sequence_length=None, initial_state=None,
                   dtype=None, parallel_iterations=None, swap_memory=False,
                   time_major=False, scope=None):
    assert not time_major  # TODO : to be implemented later!

    flat_inputs = flatten(inputs, 2)  # [-1, J, d]
    flat_len = None if sequence_length is None else tf.cast(flatten(sequence_length, 0), 'int64')

    flat_inputs = tf.reverse(flat_inputs, 1) if sequence_length is None \
        else tf.reverse_sequence(flat_inputs, sequence_length, 1)
    flat_outputs, final_state = _dynamic_rnn(cell, flat_inputs, sequence_length=flat_len,
                                             initial_state=initial_state, dtype=dtype,
                                             parallel_iterations=parallel_iterations, swap_memory=swap_memory,
                                             time_major=time_major, scope=scope)
    flat_outputs = tf.reverse(flat_outputs, 1) if sequence_length is None \
        else tf.reverse_sequence(flat_outputs, sequence_length, 1)

    outputs = reconstruct(flat_outputs, inputs, 2)
    return outputs, final_state
开发者ID:codealphago,项目名称:convai-bot-1337,代码行数:19,代码来源:rnn.py


示例20: dynamic_bidirectional_rnn

def dynamic_bidirectional_rnn(cell, pre_inputs, sequence_length=None, initial_state=None,
                              dtype=None, parallel_iterations=None, swap_memory=False,
                              time_major=False, scope=None, feed_prev_out=False,
                              num_layers=1, reuse_layers=True):
    isinstance(cell, BiRNNCell)
    with vs.variable_scope(scope or "Bi-RNN") as root_scope:
        inputs_list = []
        outputs_list = []
        outputs_fw_list = []
        outputs_bw_list = []
        state_fw_list = []
        state_bw_list = []
        for layer_idx in range(num_layers):
            scope_name = "layer_{}".format(layer_idx)
            with name_scope(scope_name) if reuse_layers else vs.variable_scope(scope_name):
                inputs = cell.pre(pre_inputs)
                outputs_fw, state_fw = dynamic_rnn(cell, inputs, sequence_length=sequence_length, initial_state=initial_state,
                    dtype=dtype, parallel_iterations=parallel_iterations, swap_memory=swap_memory,
                    time_major=time_major, feed_prev_out=feed_prev_out, scope='FW')
                inputs_rev = reverse_sequence(inputs, sequence_length, 1)
                outputs_bw_rev, state_bw = dynamic_rnn(cell, inputs_rev, sequence_length=sequence_length, initial_state=initial_state,
                    dtype=dtype, parallel_iterations=parallel_iterations, swap_memory=swap_memory,
                    time_major=time_major, feed_prev_out=feed_prev_out, scope='BW')
                outputs_bw = reverse_sequence(outputs_bw_rev, sequence_length, 1)
                outputs = cell.post(outputs_fw, outputs_bw)
                pre_inputs = outputs
                inputs_list.append(inputs)
                outputs_list.append(outputs)
                outputs_fw_list.append(outputs_fw)
                outputs_bw_list.append(outputs_bw)
                state_fw_list.append(state_fw)
                state_bw_list.append(state_bw)
                if reuse_layers:
                    root_scope.reuse_variables()
        tensors = dict()
        tensors['in'] = transpose(pack(inputs_list), [1, 0, 2, 3])
        tensors['out'] = transpose(pack(outputs_list), [1, 0, 2, 3])
        tensors['fw_out'] = transpose(pack(outputs_fw_list), [1, 0, 2, 3])  # [N, L, M, d]
        tensors['bw_out'] = transpose(pack(outputs_bw_list), [1, 0, 2, 3])  # [N, L, M, d]
        tensors['fw_state'] = transpose(pack(state_fw_list), [1, 0, 2])  # [N, L, d]
        tensors['bw_state'] = transpose(pack(state_bw_list), [1, 0, 2])  # [N, L, d]
    return outputs_list[-1], state_fw_list[-1], state_bw_list[-1], tensors
开发者ID:eunchung,项目名称:qrn,代码行数:42,代码来源:rnn.py



注:本文中的tensorflow.reverse_sequence函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。


鲜花

握手

雷人

路过

鸡蛋
该文章已有0人参与评论

请发表评论

全部评论

专题导读
上一篇:
Python tensorflow.round函数代码示例发布时间:2022-05-27
下一篇:
Python tensorflow.reverse函数代码示例发布时间:2022-05-27
热门推荐
阅读排行榜

扫描微信二维码

查看手机版网站

随时了解更新最新资讯

139-2527-9053

在线客服(服务时间 9:00~18:00)

在线QQ客服
地址:深圳市南山区西丽大学城创智工业园
电邮:jeky_zhao#qq.com
移动电话:139-2527-9053

Powered by 互联科技 X3.4© 2001-2213 极客世界.|Sitemap