• 设为首页
  • 点击收藏
  • 手机版
    手机扫一扫访问
    迪恩网络手机版
  • 关注官方公众号
    微信扫一扫关注
    公众号

Python tensorflow.unstack函数代码示例

原作者: [db:作者] 来自: [db:来源] 收藏 邀请

本文整理汇总了Python中tensorflow.unstack函数的典型用法代码示例。如果您正苦于以下问题:Python unstack函数的具体用法?Python unstack怎么用?Python unstack使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。



在下文中一共展示了unstack函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。

示例1: _init_decoder_train_connectors

  def _init_decoder_train_connectors(self):
    with tf.name_scope('DecoderTrainFeeds'):
      sequence_size,batch_size = tf.unstack(tf.shape(self.decoder_targets))
      self.EOS_SLICE = tf.ones([1, batch_size], dtype=tf.int32) * self.EOS
      self.PAD_SLICE = tf.ones([1, batch_size], dtype=tf.int32) * self.PAD

      self.decoder_train_inputs = tf.concat([self.EOS_SLICE, self.decoder_targets], axis=0)
      self.decoder_train_length = self.decoder_targets_length + 1

      decoder_train_targets = tf.concat([self.decoder_targets, self.PAD_SLICE], axis=0)
      self.decoder_train_targets_seq_len,_= tf.unstack(tf.shape(decoder_train_targets))
      decoder_train_targets_eos_mask = tf.one_hot(self.decoder_train_length - 1,
                                                  self.decoder_train_targets_seq_len,
                                                  on_value=self.EOS, off_value=self.PAD,
                                                  dtype=tf.int32)
      self.decoder_train_targets_eos_mask = tf.transpose(decoder_train_targets_eos_mask, [1, 0])
      
      self.temp_decoder_train_targets = decoder_train_targets
      # hacky way using one_hot to put EOS symbol at the end of target sequence
      decoder_train_targets = tf.add(decoder_train_targets,
                                     self.decoder_train_targets_eos_mask)

      self.decoder_train_targets = decoder_train_targets

      self.loss_weights = tf.ones([
          batch_size,
          tf.reduce_max(self.decoder_train_length)
      ], dtype=tf.float32, name="loss_weights")
开发者ID:wujsAct,项目名称:TeachingMachineReadAndComprehend,代码行数:28,代码来源:layers.py


示例2: buildModel

  def buildModel(self, lstm_layer, is_dynamic_rnn, is_train):
    # Weights and biases for output softmax layer.
    out_weights = tf.Variable(
        tf.random_normal([self.num_units, self.n_classes]))
    out_bias = tf.Variable(tf.random_normal([self.n_classes]))

    # input image placeholder
    x = tf.placeholder(
        "float", [None, self.time_steps, self.n_input], name="INPUT_IMAGE")

    # For dynamic_rnn, train with dynamic_rnn and inference with static_rnn.
    # x is shaped [batch_size,time_steps,num_inputs]
    if is_dynamic_rnn:
      if is_train:
        lstm_input = x
        outputs, _ = tf.nn.dynamic_rnn(lstm_layer, lstm_input, dtype="float32")
        outputs = tf.unstack(outputs, axis=1)
      else:
        lstm_input = tf.unstack(x, self.time_steps, 1)
        outputs, _ = tf.nn.static_rnn(lstm_layer, lstm_input, dtype="float32")
    else:
      lstm_input = tf.unstack(x, self.time_steps, 1)
      outputs, _ = tf.nn.static_rnn(lstm_layer, lstm_input, dtype="float32")

    # Compute logits by multiplying outputs[-1] of shape [batch_size,num_units]
    # by the softmax layer's out_weight of shape [num_units,n_classes]
    # plus out_bias
    prediction = tf.matmul(outputs[-1], out_weights) + out_bias
    output_class = tf.nn.softmax(prediction, name="OUTPUT_CLASS")

    return x, prediction, output_class
开发者ID:JonathanRaiman,项目名称:tensorflow,代码行数:31,代码来源:unidirectional_sequence_lstm_test.py


示例3: merge

 def merge(inputs, targets):
   """Split inputs and targets into lists."""
   inputs = tf.unstack(inputs, axis=1)
   targets = tf.unstack(targets, axis=1)
   assert len(inputs) == hparams.video_num_input_frames
   assert len(targets) == hparams.video_num_target_frames
   return inputs + targets
开发者ID:qixiuai,项目名称:tensor2tensor,代码行数:7,代码来源:base.py


示例4: gather_neighbors

def gather_neighbors(X, nbr_indices, B, N, M, d):
  """Gathers the neighbor subsets of the atoms in X.

  B = batch_size, N = max_num_atoms, M = max_num_neighbors, d = num_features

  Parameters
  ----------
  X: tf.Tensor of shape (B, N, d)
    Coordinates/features tensor.
  atom_indices: tf.Tensor of shape (B, M)
    Neighbor list for single atom.

  Returns
  -------
  neighbors: tf.Tensor of shape (B, M, d)
    Neighbor coordinates/features tensor for single atom.

  """

  example_tensors = tf.unstack(X, axis=0)
  example_nbrs = tf.unstack(nbr_indices, axis=0)
  all_nbr_coords = []
  for example, (example_tensor, example_nbr) in enumerate(
      zip(example_tensors, example_nbrs)):
    nbr_coords = tf.gather(example_tensor, example_nbr)
    all_nbr_coords.append(nbr_coords)
  neighbors = tf.stack(all_nbr_coords)
  return neighbors
开发者ID:ktaneishi,项目名称:deepchem,代码行数:28,代码来源:atomicnet_ops.py


示例5: __call__

    def __call__(self, inputs, seq_len, keep_prob=1.0, is_train=None, concat_layers=True):
        outputs = [tf.transpose(inputs, [1, 0, 2])]
        for layer in range(self.num_layers):
            gru_fw, gru_bw = self.grus[layer]
            init_fw, init_bw = self.inits[layer]
            mask_fw, mask_bw = self.dropout_mask[layer]
            with tf.variable_scope('fw_{}'.format(layer), reuse=tf.AUTO_REUSE):
                with tf.variable_scope('cudnn_gru', reuse=tf.AUTO_REUSE):
                    out_fw, _ = tf.nn.dynamic_rnn(cell=gru_fw, inputs=outputs[-1] * mask_fw, time_major=True,
                                                  initial_state=tuple(tf.unstack(init_fw, axis=0)))

            with tf.variable_scope('bw_{}'.format(layer), reuse=tf.AUTO_REUSE):
                with tf.variable_scope('cudnn_gru', reuse=tf.AUTO_REUSE):
                    inputs_bw = tf.reverse_sequence(
                        outputs[-1] * mask_bw, seq_lengths=seq_len, seq_dim=0, batch_dim=1)
                    out_bw, _ = tf.nn.dynamic_rnn(cell=gru_bw, inputs=inputs_bw, time_major=True,
                                                  initial_state=tuple(tf.unstack(init_bw, axis=0)))
                    out_bw = tf.reverse_sequence(
                        out_bw, seq_lengths=seq_len, seq_dim=0, batch_dim=1)

            outputs.append(tf.concat([out_fw, out_bw], axis=2))
        if concat_layers:
            res = tf.concat(outputs[1:], axis=2)
        else:
            res = outputs[-1]
        res = tf.transpose(res, [1, 0, 2])
        return res
开发者ID:RileyShe,项目名称:DeepPavlov,代码行数:27,代码来源:utils.py


示例6: _decode_and_random_crop

def _decode_and_random_crop(image_buffer, bbox, image_size):
  """Randomly crops image and then scales to target size."""
  with tf.name_scope('distorted_bounding_box_crop',
                     values=[image_buffer, bbox]):
    sample_distorted_bounding_box = tf.image.sample_distorted_bounding_box(
        tf.image.extract_jpeg_shape(image_buffer),
        bounding_boxes=bbox,
        min_object_covered=0.1,
        aspect_ratio_range=[0.75, 1.33],
        area_range=[0.08, 1.0],
        max_attempts=10,
        use_image_if_no_bounding_boxes=True)
    bbox_begin, bbox_size, _ = sample_distorted_bounding_box

    # Crop the image to the specified bounding box.
    offset_y, offset_x, _ = tf.unstack(bbox_begin)
    target_height, target_width, _ = tf.unstack(bbox_size)
    crop_window = tf.stack([offset_y, offset_x, target_height, target_width])
    image = tf.image.decode_and_crop_jpeg(image_buffer, crop_window, channels=3)
    image = tf.image.convert_image_dtype(
        image, dtype=tf.float32)

    image = tf.image.resize_bicubic([image],
                                    [image_size, image_size])[0]

    return image
开发者ID:812864539,项目名称:models,代码行数:26,代码来源:imagenet_input.py


示例7: vec2mtrxBatch

def vec2mtrxBatch(pBatch,opt):
	with tf.name_scope("vec2mtrx"):
		batchSize = tf.shape(pBatch)[0]
		O = tf.zeros([batchSize])
		I = tf.ones([batchSize])
		if opt.warpType=="translation":
			tx,ty = tf.unstack(pBatch,axis=1)
			pMtrxBatch = tf.transpose(tf.stack([[I,O,tx],
												[O,I,ty],
												[O,O,I]]),perm=[2,0,1])
		elif opt.warpType=="similarity":
			pc,ps,tx,ty = tf.unstack(pBatch,axis=1)
			pMtrxBatch = tf.transpose(tf.stack([[I+pc,-ps,tx],
												[ps,I+pc,ty],
												[O,O,I]]),perm=[2,0,1])
		elif opt.warpType=="affine":
			p1,p2,p3,p4,p5,p6 = tf.unstack(pBatch,axis=1)
			pMtrxBatch = tf.transpose(tf.stack([[I+p1,p2,p3],
												[p4,I+p5,p6],
												[O,O,I]]),perm=[2,0,1])
		elif opt.warpType=="homography":
			p1,p2,p3,p4,p5,p6,p7,p8 = tf.unstack(pBatch,axis=1)
			pMtrxBatch = tf.transpose(tf.stack([[I+p1,p2,p3],
												[p4,I+p5,p6],
												[p7,p8,I]]),perm=[2,0,1])
	return pMtrxBatch
开发者ID:sunshinezhe,项目名称:IC-STN,代码行数:26,代码来源:warp.py


示例8: buildModel

  def buildModel(self, fw_lstm_layer, bw_lstm_layer, is_dynamic_rnn):
    # Weights and biases for output softmax layer.
    out_weights = tf.Variable(
        tf.random_normal([self.num_units * 2, self.n_classes]))
    out_bias = tf.Variable(tf.random_normal([self.n_classes]))

    # input image placeholder
    x = tf.placeholder(
        "float", [None, self.time_steps, self.n_input], name="INPUT_IMAGE")

    if is_dynamic_rnn:
      lstm_inputs = tf.transpose(x, [1, 0, 2])
      outputs, _ = bidirectional_dynamic_rnn(
          fw_lstm_layer,
          bw_lstm_layer,
          lstm_inputs,
          dtype="float32",
          time_major=True)
      fw_outputs, bw_outputs = outputs
      output = tf.concat([fw_outputs, bw_outputs], 2)
      output = tf.unstack(output, axis=0)
      output = output[-1]
    else:
      lstm_input = tf.unstack(x, self.time_steps, 1)
      outputs, _, _ = tf.nn.static_bidirectional_rnn(
          fw_lstm_layer, bw_lstm_layer, lstm_input, dtype="float32")
      output = outputs[-1]

    # Compute logits by multiplying output of shape [batch_size,num_units*2]
    # by the softmax layer's out_weight of shape [num_units*2,n_classes]
    # plus out_bias
    prediction = tf.matmul(output, out_weights) + out_bias
    output_class = tf.nn.softmax(prediction, name="OUTPUT_CLASS")

    return x, prediction, output_class
开发者ID:kylin9872,项目名称:tensorflow,代码行数:35,代码来源:bidirectional_sequence_lstm_test.py


示例9: lstm_word_embedding_from_chars

def lstm_word_embedding_from_chars(chars, lengths, embed_dim, scope='lstm-word-embed', reuse=False):
    """
    Word embeddings via LSTM encoding of character sequences.

    Args:
        chars: Tensor of shape [batch_size, word sequence length, char sequence length, num characters].
        lengths: Tensor of shape [batch_size, word_sequence length].
        embed_dim: Dimension of word embeddings.  Integer.

    Returns:
        Sequence of embedding vectors.  Tensor of shape [batch_size, word sequence length, embed_dim].

    """
    chars = tf.cast(chars, tf.float32)

    # this is super inefficient
    chars = tf.unstack(chars, axis=0)
    lengths = tf.unstack(lengths, axis=0)

    lstm_word_embeddings = []
    for i, (char, length) in enumerate(zip(chars, lengths)):
        temp_reuse = i != 0 or reuse
        embedding = lstm_encoder(char, length, embed_dim, 1.0, scope=scope, reuse=temp_reuse)
        lstm_word_embeddings.append(embedding)
    lstm_word_embeddings = tf.stack(lstm_word_embeddings, axis=0)

    return lstm_word_embeddings
开发者ID:charlesjansen,项目名称:quora-duplicate-questions,代码行数:27,代码来源:embed.py


示例10: input_fn

def input_fn(data_dir, subset, num_shards, batch_size):
    """Create input graph for model.

    Args:
        data_dir: Directory where TFRecords representing the dataset are located.
        subset: one of 'train', 'validate' and 'eval'.
        num_shards: num of towers participating in data-parallel training.
        batch_size: total batch size for training to be divided by the number of
        shards.
    Returns:
        two lists of tensors for features and labels, each of num_shards length.
    """
    with tf.device('/cpu:0'):
        dataset = mlp_data.MlpDataSet(data_dir, subset)
        image_batch, label_batch = dataset.make_batch(batch_size)
        if num_shards <= 1:
            # No GPU available or only 1 GPU.
            return [image_batch], [label_batch]

        # Note that passing num=batch_size is safe here, even though
        # dataset.batch(batch_size) can, in some cases, return fewer than batch_size
        # examples. This is because it does so only when repeating for a limited
        # number of epochs, but our dataset repeats forever.
        image_batch = tf.unstack(image_batch, num=batch_size, axis=0)
        label_batch = tf.unstack(label_batch, num=batch_size, axis=0)
        feature_shards = [[] for i in range(num_shards)]
        label_shards = [[] for i in range(num_shards)]
        for i in xrange(batch_size):
            idx = i % num_shards
            feature_shards[idx].append(image_batch[i])
            label_shards[idx].append(label_batch[i])
        feature_shards = [tf.parallel_stack(x) for x in feature_shards]
        label_shards = [tf.parallel_stack(x) for x in label_shards]
        return feature_shards, label_shards
开发者ID:GeoffGao,项目名称:apollo,代码行数:34,代码来源:mlp_main.py


示例11: wasserstein_disagreement_map

def wasserstein_disagreement_map(
        prediction, ground_truth, weight_map=None, M=None):
    """
    Function to calculate the pixel-wise Wasserstein distance between the
    flattened prediction and the flattened labels (ground_truth) with respect
    to the distance matrix on the label space M.

    :param prediction: the logits after softmax
    :param ground_truth: segmentation ground_truth
    :param M: distance matrix on the label space
    :return: the pixelwise distance map (wass_dis_map)
    """
    if weight_map is not None:
        # raise NotImplementedError
        tf.logging.warning('Weight map specified but not used.')

    assert M is not None, "Distance matrix is required."
    # pixel-wise Wassertein distance (W) between flat_pred_proba and flat_labels
    # wrt the distance matrix on the label space M
    n_classes = prediction.shape[1].value
    unstack_labels = tf.unstack(ground_truth, axis=-1)
    unstack_labels = tf.cast(unstack_labels, dtype=tf.float64)
    unstack_pred = tf.unstack(prediction, axis=-1)
    unstack_pred = tf.cast(unstack_pred, dtype=tf.float64)
    # print("shape of M", M.shape, "unstacked labels", unstack_labels,
    #       "unstacked pred" ,unstack_pred)
    # W is a weighting sum of all pairwise correlations (pred_ci x labels_cj)
    pairwise_correlations = []
    for i in range(n_classes):
        for j in range(n_classes):
            pairwise_correlations.append(
                M[i, j] * tf.multiply(unstack_pred[i], unstack_labels[j]))
    wass_dis_map = tf.add_n(pairwise_correlations)
    return wass_dis_map
开发者ID:nhu2000,项目名称:NiftyNet,代码行数:34,代码来源:loss_segmentation.py


示例12: hard_negative_mining

      def hard_negative_mining():
        bboxes_per_batch = tf.unstack(bboxes)
        classification_loss_per_batch = tf.unstack(classification_loss)
        num_positives_per_batch = tf.unstack(tf.reduce_sum(positives, axis=-1))
        neg_class_loss_per_batch = tf.unstack(neg_class_loss_all)

        neg_class_losses = []
        total_negatives = []

        for bboxes_per_image, classification_loss_per_image, num_positives_per_image, neg_class_loss_per_image in \
            zip(bboxes_per_batch, classification_loss_per_batch, num_positives_per_batch, neg_class_loss_per_batch):
          min_negatives_keep = tf.maximum(self.neg_pos_ratio * num_positives_per_image, 3)
          num_negatives_keep = tf.minimum(min_negatives_keep,
                                          tf.count_nonzero(neg_class_loss_per_image, dtype=tf.float32))

          indices = tf.image.non_max_suppression(bboxes_per_image, classification_loss_per_image,
                                                 tf.to_int32(num_negatives_keep), iou_threshold=0.99)
          num_negatives = tf.size(indices)
          total_negatives.append(num_negatives)
          expanded_indexes = tf.expand_dims(indices, axis=1)  # shape: (num_negatives, 1)
          negatives_keep = tf.scatter_nd(expanded_indexes, updates=tf.ones_like(indices, dtype=tf.int32),
                                         shape=tf.shape(classification_loss_per_image))  # shape: (num_priors,)
          negatives_keep = tf.to_float(tf.reshape(negatives_keep, [num_priors]))  # shape: (batch_size, num_priors)
          neg_class_losses.append(tf.reduce_sum(classification_loss_per_image * negatives_keep, axis=-1))  # shape: (1,)

        return tf.stack(neg_class_losses), tf.reduce_sum(tf.stack(total_negatives))
开发者ID:undeadinu,项目名称:training_toolbox_tensorflow,代码行数:26,代码来源:loss.py


示例13: testCannotInferNumFromNoneShape

 def testCannotInferNumFromNoneShape(self):
   x = tf.placeholder(np.float32, shape=(None,))
   with self.assertRaisesRegexp(ValueError,
                                r'Cannot infer num from shape \(\?,\)'):
     tf.unpack(x)
   with self.assertRaisesRegexp(ValueError,
                                r'Cannot infer num from shape \(\?,\)'):
     tf.unstack(x)
开发者ID:ComeOnGetMe,项目名称:tensorflow,代码行数:8,代码来源:unpack_op_test.py


示例14: testCannotInferNumFromUnknownShape

 def testCannotInferNumFromUnknownShape(self):
   x = tf.placeholder(np.float32)
   with self.assertRaisesRegexp(
       ValueError, r'Cannot infer num from shape <unknown>'):
     tf.unpack(x)
   with self.assertRaisesRegexp(
       ValueError, r'Cannot infer num from shape <unknown>'):
     tf.unstack(x)
开发者ID:ComeOnGetMe,项目名称:tensorflow,代码行数:8,代码来源:unpack_op_test.py


示例15: sample

  def sample(self, b_enc, b_dec):
    """Generate samples for the batch from the NADE.

    Args:
      b_enc: External encoder bias terms (`b` in [1]), sized
          `[batch_size, num_hidden]`.
      b_dec: External decoder bias terms (`c` in [1]), sized
          `[batch_size, num_dims]`.

    Returns:
      sample: The generated samples, sized `[batch_size, num_dims]`.
      log_prob: The log probabilities of each observation in the batch, sized
          `[batch_size, 1]`.
    """
    batch_size = tf.shape(b_enc)[0]

    a_0 = b_enc
    sample_0 = []
    log_p_0 = tf.zeros([batch_size, 1])

    w_enc_arr = tf.unstack(self.w_enc)
    w_dec_arr = tf.unstack(self.w_dec_t)
    b_dec_arr = tf.unstack(
        tf.reshape(tf.transpose(b_dec), [self.num_dims, batch_size, 1]))

    def loop_body(i, a, sample, log_p):
      """Accumulate hidden state, sample, and log probability for index i."""
      # Get weights and bias for time step.
      w_enc_i = w_enc_arr[i]
      w_dec_i = w_dec_arr[i]
      b_dec_i = b_dec_arr[i]

      cond_p_i = self._cond_prob(a, w_dec_i, b_dec_i)

      bernoulli = tf.contrib.distributions.Bernoulli(probs=cond_p_i,
                                                     dtype=tf.float32)
      v_i = bernoulli.sample()

      # Accumulate sampled values.
      sample_new = sample + [v_i]

      # Get log probability for this value. Log space avoids numerical issues.
      log_p_i = v_i * safe_log(cond_p_i) + (1 - v_i) * safe_log(1 - cond_p_i)

      # Accumulate log probability.
      log_p_new = log_p + log_p_i

      # Encode value and add to hidden units.
      a_new = a + tf.matmul(v_i, w_enc_i)

      return a_new, sample_new, log_p_new

    # Build the actual loop.
    a, sample, log_p = a_0, sample_0, log_p_0
    for i in range(self.num_dims):
      a, sample, log_p = loop_body(i, a, sample, log_p)

    return tf.transpose(tf.squeeze(tf.stack(sample), [2])), log_p
开发者ID:eraoul,项目名称:magenta,代码行数:58,代码来源:pianoroll_rnn_nade_graph.py


示例16: buildModel

  def buildModel(self,
                 fw_rnn_layer,
                 bw_rnn_layer,
                 is_dynamic_rnn,
                 is_inference,
                 use_sequence_length=False):
    # Weights and biases for output softmax layer.
    out_weights = tf.Variable(
        tf.random_normal([self.num_units * 2, self.n_classes]))
    out_bias = tf.Variable(tf.random_normal([self.n_classes]))

    batch_size = self.batch_size
    if is_inference:
      batch_size = 1
    # input image placeholder
    x = tf.placeholder(
        "float", [batch_size, self.time_steps, self.n_input],
        name="INPUT_IMAGE")

    sequence_length = None
    if use_sequence_length:
      sequence_length = [self.time_steps] * batch_size
    if is_dynamic_rnn:
      rnn_inputs = tf.transpose(x, [1, 0, 2])
      outputs, _ = bidirectional_dynamic_rnn(
          fw_rnn_layer,
          bw_rnn_layer,
          rnn_inputs,
          sequence_length,
          dtype="float32",
          time_major=True)
      fw_outputs, bw_outputs = outputs
      output = tf.concat([fw_outputs, bw_outputs], 2)
      output = tf.unstack(output, axis=0)
      output = output[-1]
    else:
      rnn_inputs = tf.unstack(x, self.time_steps, 1)
      # Sequence length is not supported for static since we don't have a
      # wrapper for it. At training phase, we can still have sequence_length,
      # but inference phase, we change it to None.
      if is_inference:
        sequence_length = None
      outputs, _, _ = tf.nn.static_bidirectional_rnn(
          fw_rnn_layer,
          bw_rnn_layer,
          rnn_inputs,
          dtype="float32",
          sequence_length=sequence_length)
      output = outputs[-1]

    # Compute logits by multiplying output of shape [batch_size,num_units*2]
    # by the softmax layer's out_weight of shape [num_units*2,n_classes]
    # plus out_bias
    prediction = tf.matmul(output, out_weights) + out_bias
    output_class = tf.nn.softmax(prediction, name="OUTPUT_CLASS")

    return x, prediction, output_class
开发者ID:kylin9872,项目名称:tensorflow,代码行数:57,代码来源:bidirectional_sequence_rnn_test.py


示例17: construct_model

  def construct_model(self, images, actions, rewards):
    images = tf.unstack(images, axis=0)
    actions = tf.unstack(actions, axis=0)
    rewards = tf.unstack(rewards, axis=0)

    batch_size = common_layers.shape_list(images[0])[0]
    context_frames = self.hparams.video_num_input_frames

    # Predicted images and rewards.
    gen_rewards, gen_images, latent_means, latent_stds = [], [], [], []

    # LSTM states.
    lstm_state = [None] * 7

    # Create scheduled sampling function
    ss_func = self.get_scheduled_sample_func(batch_size)

    pred_image = tf.zeros_like(images[0])
    pred_reward = tf.zeros_like(rewards[0])
    latent = None
    for timestep, image, action, reward in zip(
        range(len(images)-1), images[:-1], actions[:-1], rewards[:-1]):
      # Scheduled Sampling
      done_warm_start = timestep > context_frames - 1
      groundtruth_items = [image, reward]
      generated_items = [pred_image, pred_reward]
      input_image, input_reward = self.get_scheduled_sample_inputs(
          done_warm_start, groundtruth_items, generated_items, ss_func)

      # Latent
      # TODO(mbz): should we use input_image iunstead of image?
      latent_images = tf.stack([image, images[timestep+1]], axis=0)
      latent_mean, latent_std = self.construct_latent_tower(
          latent_images, time_axis=0)
      latent = common_video.get_gaussian_tensor(latent_mean, latent_std)
      latent_means.append(latent_mean)
      latent_stds.append(latent_std)

      # Prediction
      pred_image, lstm_state, _ = self.construct_predictive_tower(
          input_image, input_reward, action, lstm_state, latent)

      if self.hparams.reward_prediction:
        pred_reward = self.reward_prediction(
            pred_image, input_reward, action, latent)
        pred_reward = common_video.decode_to_shape(
            pred_reward, common_layers.shape_list(input_reward), "reward_dec")
      else:
        pred_reward = input_reward

      gen_images.append(pred_image)
      gen_rewards.append(pred_reward)

    gen_images = tf.stack(gen_images, axis=0)
    gen_rewards = tf.stack(gen_rewards, axis=0)

    return gen_images, gen_rewards, latent_means, latent_stds
开发者ID:qixiuai,项目名称:tensor2tensor,代码行数:57,代码来源:sv2p.py


示例18: batch_transform_cloud_xyz

def batch_transform_cloud_xyz(cloud, transform):
  results = []
  cloud_items = tf.unstack(cloud)
  if len(transform.shape.as_list()) == 2:
    transform_items = tf.unstack(transform)
  else:
    transform_items = [transform] * len(cloud_items)
  for cloud_item, transform_item in zip(cloud_items, transform_items):
    results.append(transform_cloud_xyz(cloud_item, transform_item))
  return tf.stack(results)
开发者ID:ALISCIFP,项目名称:models,代码行数:10,代码来源:icp_util.py


示例19: regressAnchors

def regressAnchors(anchors, bbox_regression, axis=-1):
    """ Given preliminary bounding boxes, regress them to their final location

    The bounding box regressions are outputs of convolutional layers and of
    the form (dx,dy,dw,dh), where dw and dh are the natural logarithms of
    the desired changes in width and height and where dx and dy are unscaled
    x and y displacements; they must be mulitiplied by the width and height of a given
    bounding box to arrive at the actual displacements.
    """
    # Our shifted anchors come in the shape (numBaseAnchors, feat_h, feat_w, 4)
    # We want to separate out the 4 regression variables, {dx,dy,dw,dh}, out into their
    # own fith dimension for the output of the regression as well!

    # (Actually, we're going to assume that the regressions are ALSO in the form
    # (numBaseAnchors, feat_h, feat_w, 4) !  This can be enforced at another stage.

    with tf.device("/cpu:0"):
        x1, y1, x2, y2 = tf.unstack(anchors, num=4, axis=axis)
        dx, dy, dw, dh = tf.unstack(bbox_regression, num=4, axis=axis)

        # We must get the anchors into the same width/height x/y format as the
        # bbox_regressions
        w = x2 - x1 + [1.]
        h = y2 - y1 + [1.]
        x = w / [2.] + x1
        y = h / [2.] + y1

        # The dx and dy given by the regression must be scaled by w and h and added
        # to the anchors
        x_new = dx * w + x
        y_new = dy * h + y

        # Since logarithms of the values in question are easier to learn (no regression means
        # a logarithm of the change being zero), we learn the logarithms of h, w.
        w_new = tf.exp(dw) * w
        h_new = tf.exp(dh) * h

        # Transform back to the original (x1,y1,x2,y2) coordinate system
        x1_final = x_new - [.5] * w_new
        y1_final = y_new - [.5] * h_new

        # x2, y2 represent bottom-left corner of top-right pixels.  Hence we need to
        # subtract one, or else calling regressAnchors with trivial regressions augments
        # x2 and y2 by one every single time it is called.
        x2_final = x_new + [.5] * w_new - [1.]
        y2_final = y_new + [.5] * h_new - [1.]

        # Stack our anchors back up
        regressedAnchors = tf.stack([x1_final, y1_final, x2_final, y2_final], axis,
                name="regressed_anchors")

    # The output shape is the same as the input shape;  Output shape is
    # regressedAnchors.shape = (numBaseAnchors, feature_h, feature_w, 4)
    return regressedAnchors
开发者ID:PentaHiggs,项目名称:fantastic-pancakes,代码行数:54,代码来源:rpn.py


示例20: _add_seq2seq

  def _add_seq2seq(self):
    """Add the whole sequence-to-sequence model to the graph."""
    hps = self._hps
    vsize = self._vocab.size() # size of the vocabulary

    with tf.variable_scope('seq2seq'):
      # Some initializers
      self.rand_unif_init = tf.random_uniform_initializer(-hps.rand_unif_init_mag, hps.rand_unif_init_mag, seed=123)
      self.trunc_norm_init = tf.truncated_normal_initializer(stddev=hps.trunc_norm_init_std)

      # Add embedding matrix (shared by the encoder and decoder inputs)
      with tf.variable_scope('embedding'):
        if FLAGS.embedding:
          embedding = tf.Variable(self.embedding_place)
        else:
          embedding = tf.get_variable('embedding', [vsize, hps.emb_dim], dtype=tf.float32, initializer=self.trunc_norm_init)
        if hps.mode=="train": self._add_emb_vis(embedding) # add to tensorboard
        emb_enc_inputs = tf.nn.embedding_lookup(embedding, self._enc_batch) # tensor with shape (batch_size, max_enc_steps, emb_size)
        emb_dec_inputs = [tf.nn.embedding_lookup(embedding, x) for x in tf.unstack(self._dec_batch, axis=1)] # list length max_dec_steps containing shape (batch_size, emb_size)

      # Add the encoder.
      enc_outputs, fw_st, bw_st = self._add_encoder(emb_enc_inputs, self._enc_lens)
      self._enc_states = enc_outputs

      # Our encoder is bidirectional and our decoder is unidirectional so we need to reduce the final encoder hidden state to the right size to be the initial decoder hidden state
      self._dec_in_state = self._reduce_states(fw_st, bw_st)

      # Add the decoder.
      with tf.variable_scope('decoder'):
        (self.decoder_outputs, self._dec_out_state, self.attn_dists, self.p_gens, self.coverage, self.vocab_scores,
         self.final_dists, self.samples, self.greedy_search_samples, self.temporal_es,
         self.sampling_rewards, self.greedy_rewards) = self._add_decoder(emb_dec_inputs, embedding)

      if FLAGS.use_discounted_rewards and hps.rl_training and hps.mode in ['train', 'eval']:
        # Get the sampled and greedy sentence from model output
        # self.samples: (max_dec_steps, batch_size, k)
        self.sampling_discounted_rewards = tf.stack(self.discount_rewards(tf.unstack(self.sampling_rewards))) # list of max_dec_steps * (batch_size, k)
        self.greedy_discounted_rewards = tf.stack(self.discount_rewards(tf.unstack(self.greedy_rewards))) # list of max_dec_steps * (batch_size, k)
      elif FLAGS.use_intermediate_rewards and hps.rl_training and hps.mode in ['train', 'eval']:
        # Get the sampled and greedy sentence from model output
        # self.samples: (max_dec_steps, batch_size, k)
        self.sampling_discounted_rewards = tf.stack(self.intermediate_rewards(tf.unstack(self.sampling_rewards))) # list of max_dec_steps * (batch_size, k)
        self.greedy_discounted_rewards = tf.stack(self.intermediate_rewards(tf.unstack(self.greedy_rewards))) # list of max_dec_steps * (batch_size, k)
      elif hps.ac_training and hps.mode in ['train', 'eval']:
        # Get the sampled and greedy sentence from model output
        self.sampled_sentences = tf.transpose(tf.stack(self.samples), perm=[1,2,0]) # (batch_size, k, <=max_dec_steps) word indices
        self.greedy_search_sentences = tf.transpose(tf.stack(self.greedy_search_samples), perm=[1,2,0]) # (batch_size, k, <=max_dec_steps) word indices

    if hps.mode == "decode":
      # We run decode beam search mode one decoder step at a time
      assert len(self.final_dists)==1 # final_dists is a singleton list containing shape (batch_size, extended_vsize)
      self.final_dists = self.final_dists[0]
      topk_probs, self._topk_ids = tf.nn.top_k(self.final_dists, hps.batch_size*2) # take the k largest probs. note batch_size=beam_size in decode mode
      self._topk_log_probs = tf.log(topk_probs)
开发者ID:sra4077,项目名称:RLSeq2Seq,代码行数:54,代码来源:model.py



注:本文中的tensorflow.unstack函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。


鲜花

握手

雷人

路过

鸡蛋
该文章已有0人参与评论

请发表评论

全部评论

专题导读
上一篇:
Python tensorflow.variable_op_scope函数代码示例发布时间:2022-05-27
下一篇:
Python tensorflow.unsorted_segment_sum函数代码示例发布时间:2022-05-27
热门推荐
阅读排行榜

扫描微信二维码

查看手机版网站

随时了解更新最新资讯

139-2527-9053

在线客服(服务时间 9:00~18:00)

在线QQ客服
地址:深圳市南山区西丽大学城创智工业园
电邮:jeky_zhao#qq.com
移动电话:139-2527-9053

Powered by 互联科技 X3.4© 2001-2213 极客世界.|Sitemap