• 设为首页
  • 点击收藏
  • 手机版
    手机扫一扫访问
    迪恩网络手机版
  • 关注官方公众号
    微信扫一扫关注
    公众号

Python tensorflow.range函数代码示例

原作者: [db:作者] 来自: [db:来源] 收藏 邀请

本文整理汇总了Python中tensorflow.range函数的典型用法代码示例。如果您正苦于以下问题:Python range函数的具体用法?Python range怎么用?Python range使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。



在下文中一共展示了range函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。

示例1: loop

        def loop(step_, beams_, beam_value_, golden_value_, golden_inside_, step_valid_, g_id_, golden_record, beam_record):
            cur_feat_x_ = tf.gather(x, step_)
            cur_golden_path_ = tf.gather(golden_path, tf.range(step_))
            cur_golden_feat_ = self._add_tag_dynamic(cur_feat_x_, cur_golden_path_)
            # cur_golden_output_ = self._build_cnn(cur_golden_feat_)
            cur_golden_output_ = build(cur_golden_feat_)
            cur_golden_node_ = tf.gather(golden_path, tf.reshape(step_, [1]))
            golden_value_ = tf.add(golden_value_,
                                  tf.slice(cur_golden_output_, tf.concat(0, [[0], cur_golden_node_]), [1, 1]))

            cur_beam_ = tf.unpack(beams_, num=self.beam_size)
            cur_beam_feat_ = tf.concat(0, [self._add_tag_dynamic(cur_feat_x_, tf.reshape(e, [-1])) for e in cur_beam_])
            # cur_beam_output_ = self._build_cnn(cur_beam_feat_)
            cur_beam_output_ = build(cur_beam_feat_)

            golden_record = golden_record.write(step_, cur_golden_output_)
            beam_record = beam_record.write(step_, cur_beam_output_)

            beam_value_, beams_ = self._top_beams_new(cur_beam_output_, beam_value_, beams_)
            new_golden_path_ = tf.gather(golden_path, tf.range(step_ + 1))
            # golden_beam_id_ = index_of_tensor(new_golden_path_, beams_)
            g_id_ = index_of_tensor(new_golden_path_, beams_)
            golden_inside_ = tf.select(tf.less(tf.shape(g_id_)[0], 1),
                                       tf.constant(False, tf.bool), tf.constant(True, tf.bool))

            step_valid_ = tf.logical_and(tf.less(step_+1, length), tf.less(step_+1, self.max_step_tracked))
            return [step_ + 1, beams_, beam_value_, golden_value_, golden_inside_, step_valid_, g_id_, golden_record, beam_record]
开发者ID:staylonging,项目名称:tf,代码行数:27,代码来源:cnnglobal.py


示例2: default_exchange_proposed_fn_

  def default_exchange_proposed_fn_(num_replica, seed=None):
    """Default function for `exchange_proposed_fn` of `kernel`."""
    num_replica = tf.to_int32(num_replica)

    seed = distributions_util.gen_new_seed(seed, 'default_exchange_proposed_fn')
    random_uniform = tf.random_uniform([], seed=seed)
    accept_proposed_exchange = random_uniform < probs

    seed = distributions_util.gen_new_seed(seed, 'default_exchange_proposed_fn')
    zero_start = tf.random_uniform([], seed=seed) > 0.5
    if num_replica % 2 == 0:
      exchange_proposed = tf.where(
          zero_start, tf.range(num_replica),
          tf.sparse_to_dense(tf.range(num_replica - 2), (num_replica,),
                             tf.range(1, num_replica - 1)))
      exchange_proposed_n = tf.where(zero_start, num_replica // 2,
                                     num_replica // 2 - 1)
    else:
      exchange_proposed = tf.where(
          zero_start, tf.range(num_replica - 1), tf.range(1, num_replica))
      exchange_proposed_n = num_replica // 2

    exchange_proposed = tf.reshape(exchange_proposed, (num_replica // 2, 2))
    exchange_proposed = tf.where(accept_proposed_exchange, exchange_proposed,
                                 tf.zeros_like(exchange_proposed))
    exchange_proposed_n = tf.where(accept_proposed_exchange,
                                   exchange_proposed_n,
                                   tf.zeros_like(exchange_proposed_n))
    return exchange_proposed, exchange_proposed_n
开发者ID:lewisKit,项目名称:probability,代码行数:29,代码来源:replica_exchange_mc.py


示例3: unpool_layer2x2_batch

    def unpool_layer2x2_batch(self, bottom, argmax):
        bottom_shape = tf.shape(bottom)
        top_shape = [bottom_shape[0], bottom_shape[1] * 2, bottom_shape[2] * 2, bottom_shape[3]]

        batch_size = top_shape[0]
        height = top_shape[1]
        width = top_shape[2]
        channels = top_shape[3]

        argmax_shape = tf.to_int64([batch_size, height, width, channels])
        argmax = self.unravel_argmax(argmax, argmax_shape)

        t1 = tf.to_int64(tf.range(channels))
        t1 = tf.tile(t1, [batch_size * (width // 2) * (height // 2)])
        t1 = tf.reshape(t1, [-1, channels])
        t1 = tf.transpose(t1, perm=[1, 0])
        t1 = tf.reshape(t1, [channels, batch_size, height // 2, width // 2, 1])
        t1 = tf.transpose(t1, perm=[1, 0, 2, 3, 4])

        t2 = tf.to_int64(tf.range(batch_size))
        t2 = tf.tile(t2, [channels * (width // 2) * (height // 2)])
        t2 = tf.reshape(t2, [-1, batch_size])
        t2 = tf.transpose(t2, perm=[1, 0])
        t2 = tf.reshape(t2, [batch_size, channels, height // 2, width // 2, 1])

        t3 = tf.transpose(argmax, perm=[1, 4, 2, 3, 0])

        t = tf.concat(4, [t2, t3, t1])
        indices = tf.reshape(t, [(height // 2) * (width // 2) * channels * batch_size, 4])

        x1 = tf.transpose(bottom, perm=[0, 3, 1, 2])
        values = tf.reshape(x1, [-1])
        return tf.scatter_nd(indices, values, tf.to_int64(top_shape))
开发者ID:BenJamesbabala,项目名称:Tensorflow-DeconvNet-Segmentation,代码行数:33,代码来源:DeconvNetPipeline.py


示例4: translate

def translate(U, theta, out_height, out_width):
    num_batch = tf.shape(U)[0]
    height, width, num_ch = U.get_shape()[1:]
    height = height.value
    width = width.value
    num_ch = num_ch.value
    hwc = height*width*num_ch

    nind = tf.range(num_batch)
    x = repeat(tf.range(height), width)
    y = tf.tile(tf.range(width), tf.pack([height]))
    cind = tf.range(num_ch)

    nind = tf.expand_dims(repeat(nind, hwc), 1)
    x = tf.tile(tf.expand_dims(repeat(x, num_ch), 1), tf.pack([num_batch,1]))
    y = tf.tile(tf.expand_dims(repeat(y, num_ch), 1), tf.pack([num_batch,1]))
    cind = tf.tile(tf.expand_dims(cind, 1), tf.pack([num_batch*height*width,1]))

    dx, dy = tf.split(1, 2, theta)
    dx = tf.cast(tf.clip_by_value(dx, 0, out_height-height), 'int32')
    dx = tf.reshape(tf.tile(dx, tf.pack([1,hwc])), [-1,1])
    dy = tf.cast(tf.clip_by_value(dy, 0, out_width-width), 'int32')
    dy = tf.reshape(tf.tile(dy, tf.pack([1,hwc])), [-1,1])
    x = x + dx
    y = y + dy

    tind = tf.concat(1, [nind, x, y, cind])
    val = tf.reshape(U, [-1])
    T = tf.sparse_to_dense(tind,
            tf.pack([num_batch, out_height, out_width, num_ch]),
            val)
    T.set_shape([None, out_height, out_width, num_ch])
    return T
开发者ID:juho-lee,项目名称:tf_practice,代码行数:33,代码来源:translate.py


示例5: inference_pooling_L2norm_choose_filter

def inference_pooling_L2norm_choose_filter(images, kheight=2, kwidth=5):
    # channel domain pooling mapper
    split_dim = 1   # 1 represents split on spatial domain
    input_image_list = split_eeg.split_eeg_signal_axes(images,
                                                       split_dim=split_dim)
    input_image_length = len(input_image_list)

    # the pooling mapper should choose half size of the image size
    pool_s, _ = concat_eeg.pool_eeg_signal_channel(input_image_list, input_image_length/2, 1)
    _print_tensor_size(pool_s)

    input_shape = pool_s.get_shape()

    range_even = tf.range(0, input_shape[0], 2)
    range_odd  = tf.range(1, input_shape[0], 2)

    even_rows = tf.nn.embedding_lookup(images, range_even)
    odd_rows = tf.nn.embedding_lookup(images, range_odd)

    even_rows = tf.mul(pool_s,pool_s)
    even_rows = tf.mul(3.0, pool_s)

    even_rows = tf.nn.avg_pool(even_rows, ksize=[1, 1, 3, 1],
                            strides=[1, 1, 3, 1], padding='VALID')

    pool_s = tf.sqrt(pool_s)

    pool_s = tf.nn.max_pool(pool_s, ksize=[1, 2, 1, 1],
                             strides=[1, 2, 1, 1], padding='VALID')

    _print_tensor_size(pool_s)

    return pool_s
开发者ID:ZijingMao,项目名称:ROICNN,代码行数:33,代码来源:rsvp_quick_inference.py


示例6: accuracy_instance

def accuracy_instance(predictions, targets, n=[1, 2, 3, 4, 5, 10], nb_classes=5, nb_samples_per_class=10, batch_size=1):
    targets = tf.cast(targets, predictions.dtype)

    accuracy = tf.constant(value=0, shape=(batch_size, nb_samples_per_class), dtype=tf.float32)
    indices = tf.constant(value=0, shape=(batch_size, nb_classes+1), dtype=tf.float32)

    def step_((accuracy, indices), (p, t)):
        """with tf.variable_scope("Metric_step_var", reuse=True):
            accuracy = tf.get_variable(name="accuracy", shape=(batch_size, nb_samples_per_class),
                                       initializer=tf.constant_initializer(0), dtype=tf.float32)
            indices = tf.get_variable(name="indices", shape=(batch_size, nb_classes + 1),
                                      initializer=tf.constant_initializer(0), dtype=tf.float32)"""

        p = tf.cast(p, tf.int32)
        t = tf.cast(t, tf.int32)
        ##Accuracy Update
        batch_range = tf.cast(tf.range(0, batch_size), dtype=tf.int32)
        gather = tf.cast(tf.gather_nd(indices,tf.stack([tf.range(0,p.get_shape().as_list()[0]), t], axis=1)), tf.int32)
        index = tf.cast(tf.stack([batch_range, gather], axis=1), dtype=tf.int64)
        val = tf.cast(tf.equal(p, t), tf.float32)
        delta = tf.SparseTensor(indices=index, values=val, dense_shape=tf.cast(accuracy.get_shape().as_list(), tf.int64))
        accuracy = accuracy + tf.sparse_tensor_to_dense(delta)
        ##Index Update
        index = tf.cast(tf.stack([batch_range, t], axis=1), dtype=tf.int64)
        val = tf.constant(1.0, shape=[batch_size])
        delta = tf.SparseTensor(indices=index, values=val, dense_shape=tf.cast(indices.get_shape().as_list(), dtype=tf.int64))
        indices = indices + tf.sparse_tensor_to_dense(delta)
        return [accuracy, indices]
开发者ID:jayvischeng,项目名称:NTM-One-Shot-TF,代码行数:28,代码来源:Metrics.py


示例7: _do_maximum_mean

def _do_maximum_mean(samples, envelope, high, name=None):
  """Common code between maximum_mean and minimum_mean."""
  with tf.name_scope(name, "do_maximum_mean", [samples, envelope, high]):
    dtype = dtype_util.common_dtype([samples, envelope, high], tf.float32)
    samples = tf.convert_to_tensor(samples, name="samples", dtype=dtype)
    envelope = tf.convert_to_tensor(envelope, name="envelope", dtype=dtype)
    high = tf.convert_to_tensor(high, name="high", dtype=dtype)
    n = tf.rank(samples)
    # Move the batch dimension of `samples` to the rightmost position,
    # where the _batch_sort_vector function wants it.
    perm = tf.concat([tf.range(1, n), [0]], axis=0)
    samples = tf.transpose(samples, perm)

    samples = _batch_sort_vector(samples)

    # The maximum mean is given by taking `envelope`-worth of
    # probability from the smallest samples and moving it to the
    # maximum value.  This amounts to:
    # - ignoring the smallest k samples, where `k/n < envelope`
    # - taking a `1/n - (envelope - k/n)` part of the index k sample
    # - taking all the other samples
    # - and adding `envelope * high` at the end.
    # The following is a vectorized and batched way of computing this.
    # `max_mean_contrib` is a mask implementing the previous.
    batch_size = tf.shape(samples)[-1]
    batch_size = tf.cast(batch_size, dtype=dtype)
    step = 1. / batch_size
    cum_steps = step * tf.range(1, batch_size + 1, dtype=dtype)
    max_mean_contrib = tf.clip_by_value(
        cum_steps - envelope[..., tf.newaxis],
        clip_value_min=0.,
        clip_value_max=step)
    return tf.reduce_sum(samples * max_mean_contrib, axis=-1) + envelope * high
开发者ID:asudomoeva,项目名称:probability,代码行数:33,代码来源:statistical_testing.py


示例8: coord_addition

def coord_addition(votes, H, W):
    """Coordinate addition.

    :param votes: (24, 4, 4, 32, 10, 16)
    :param H, W: spaital height and width 4

    :return votes: (24, 4, 4, 32, 10, 16)
    """
    coordinate_offset_hh = tf.reshape(
      (tf.range(H, dtype=tf.float32) + 0.50) / H, [1, H, 1, 1, 1]
    )
    coordinate_offset_h0 = tf.constant(
      0.0, shape=[1, H, 1, 1, 1], dtype=tf.float32
    )
    coordinate_offset_h = tf.stack(
      [coordinate_offset_hh, coordinate_offset_h0] + [coordinate_offset_h0 for _ in range(14)], axis=-1
    )  # (1, 4, 1, 1, 1, 16)

    coordinate_offset_ww = tf.reshape(
      (tf.range(W, dtype=tf.float32) + 0.50) / W, [1, 1, W, 1, 1]
    )
    coordinate_offset_w0 = tf.constant(
      0.0, shape=[1, 1, W, 1, 1], dtype=tf.float32
    )
    coordinate_offset_w = tf.stack(
      [coordinate_offset_w0, coordinate_offset_ww] + [coordinate_offset_w0 for _ in range(14)], axis=-1
    ) # (1, 1, 4, 1, 1, 16)

    # (24, 4, 4, 32, 10, 16)
    votes = votes + coordinate_offset_h + coordinate_offset_w

    return votes
开发者ID:lzqkean,项目名称:deep_learning,代码行数:32,代码来源:core.py


示例9: test_docstring_example

  def test_docstring_example(self):
    # Produce the first 1000 members of the Halton sequence in 3 dimensions.
    num_results = 1000
    dim = 3
    with self.test_session():
      sample = tfp.mcmc.sample_halton_sequence(
          dim, num_results=num_results, randomized=False)

      # Evaluate the integral of x_1 * x_2^2 * x_3^3  over the three dimensional
      # hypercube.
      powers = tf.range(1., limit=dim + 1)
      integral = tf.reduce_mean(
          tf.reduce_prod(sample ** powers, axis=-1))
      true_value = 1. / tf.reduce_prod(powers + 1.)

      # Produces a relative absolute error of 1.7%.
      self.assertAllClose(integral.eval(), true_value.eval(), rtol=0.02)

      # Now skip the first 1000 samples and recompute the integral with the next
      # thousand samples. The sequence_indices argument can be used to do this.

      sequence_indices = tf.range(start=1000, limit=1000 + num_results,
                                  dtype=tf.int32)
      sample_leaped = tfp.mcmc.sample_halton_sequence(
          dim, sequence_indices=sequence_indices, randomized=False)

      integral_leaped = tf.reduce_mean(
          tf.reduce_prod(sample_leaped ** powers, axis=-1))
      self.assertAllClose(integral_leaped.eval(), true_value.eval(), rtol=0.05)
开发者ID:lewisKit,项目名称:probability,代码行数:29,代码来源:sample_halton_sequence_test.py


示例10: setup

    def setup(self, batch_size, num_concurrent):
        # Validate the batch size
        num_images = len(self.image_paths)
        batch_size = min(num_images, batch_size or self.data_spec.batch_size)
        if num_images % batch_size != 0:
            raise ValueError(
                'The total number of images ({}) must be divisible by the batch size ({}).'.format(
                    num_images, batch_size))
        self.num_batches = num_images / batch_size

        # Create a queue that will contain image paths (and their indices and extension indicator)
        if self.face_bboxes is None:
            self.path_bbox_queue = tf.FIFOQueue(capacity=num_images,
                                            dtypes=[tf.int32, tf.bool, tf.string],
                                            name='path_queue')
            indices = tf.range(num_images)
            self.enqueue_paths_op = self.path_bbox_queue.enqueue_many([indices, self.extension_mask,
                                                                   self.image_paths])
        else:
            self.path_bbox_queue = tf.FIFOQueue(capacity=num_images,
                                                dtypes=[tf.int32, tf.bool, tf.string, tf.int32],
                                                name='path_queue')
            indices = tf.range(num_images)
            self.enqueue_paths_op = self.path_bbox_queue.enqueue_many([indices, self.extension_mask,
                                                                                                  self.image_paths,self.face_bboxes])
        # Close the path queue (no more additions)
        self.close_path_queue_op = self.path_bbox_queue.close()

        # Create an operation that dequeues a single path and returns a processed image
        crop_flip = [[0,False]]
        if cfg.CROP:
            for i in range(1,5):
                crop_flip.append([i,False])

        if cfg.FLIP:
            for i in range(len(crop_flip)):
                crop_flip.append((crop_flip[i][0],True))

        (processed_idx_list,processed_img_list) = self.process(crop_flip)
        # Create a queue that will contain the processed images (and their indices)
        image_shape = (self.data_spec.crop_size, self.data_spec.crop_size, self.data_spec.channels)
        processed_queue = tf.FIFOQueue(capacity=int(np.ceil(len(crop_flip)*num_images / float(num_concurrent))),
                                       dtypes=[tf.int32, tf.float32],
                                       shapes=[(), image_shape],
                                       name='processed_queue')

        # Enqueue the processed image and path
        enqueue_processed_op = processed_queue.enqueue_many([processed_idx_list,processed_img_list])

        # Create a dequeue op that fetches a batch of processed images off the queue
        [self.ind_deq,self.img_deq] = processed_queue.dequeue_many(batch_size)
        self.dequeue_op = [self.ind_deq,self.img_deq]

        # Create a queue runner to perform the processing operations in parallel
        num_concurrent = min(num_concurrent, num_images)
        self.queue_runner = tf.train.QueueRunner(processed_queue,
                                                 [enqueue_processed_op] * num_concurrent)

        self.num_imgs = len(crop_flip)*num_images
        self.num_feats_per_image = len(crop_flip)
开发者ID:charlesLucky,项目名称:tensorflow_vgg_face,代码行数:60,代码来源:dataset.py


示例11: get_position_encoding

def get_position_encoding(
    length, hidden_size, min_timescale=1.0, max_timescale=1.0e4):
  """Return positional encoding.

  Calculates the position encoding as a mix of sine and cosine functions with
  geometrically increasing wavelengths.
  Defined and formulized in Attention is All You Need, section 3.5.

  Args:
    length: Sequence length.
    hidden_size: Size of the
    min_timescale: Minimum scale that will be applied at each position
    max_timescale: Maximum scale that will be applied at each position

  Returns:
    Tensor with shape [length, hidden_size]
  """
  position = tf.to_float(tf.range(length))
  num_timescales = hidden_size // 2
  log_timescale_increment = (
      math.log(float(max_timescale) / float(min_timescale)) /
      (tf.to_float(num_timescales) - 1))
  inv_timescales = min_timescale * tf.exp(
      tf.to_float(tf.range(num_timescales)) * -log_timescale_increment)
  scaled_time = tf.expand_dims(position, 1) * tf.expand_dims(inv_timescales, 0)
  signal = tf.concat([tf.sin(scaled_time), tf.cos(scaled_time)], axis=1)
  return signal
开发者ID:812864539,项目名称:models,代码行数:27,代码来源:model_utils.py


示例12: get_idx_map

def get_idx_map(shape):
    """Get index map for a image.
    Args:
        shape: [B, T, H, W] or [B, H, W]
    Returns:
        idx: [B, T, H, W, 2], or [B, H, W, 2]
    """
    s = shape
    ndims = tf.shape(s)
    wdim = ndims - 1
    hdim = ndims - 2
    idx_shape = tf.concat(0, [s, tf.constant([1])])
    ones_h = tf.ones(hdim - 1, dtype='int32')
    ones_w = tf.ones(wdim - 1, dtype='int32')
    h_shape = tf.concat(0, [ones_h, tf.constant([-1]), tf.constant([1, 1])])
    w_shape = tf.concat(0, [ones_w, tf.constant([-1]), tf.constant([1])])

    idx_y = tf.zeros(idx_shape, dtype='float')
    idx_x = tf.zeros(idx_shape, dtype='float')

    h = tf.slice(s, ndims - 2, [1])
    w = tf.slice(s, ndims - 1, [1])
    idx_y += tf.reshape(tf.to_float(tf.range(h[0])), h_shape)
    idx_x += tf.reshape(tf.to_float(tf.range(w[0])), w_shape)
    idx = tf.concat(ndims[0], [idx_y, idx_x])

    return idx
开发者ID:renmengye,项目名称:deep-tracker,代码行数:27,代码来源:build_deep_tracker.py


示例13: fold_batches

 def fold_batches(acc, x):
   b = x[0]
   l = x[1]
   batch = tf.tile([b], [l])
   start = tf.range(l)
   end   = tf.minimum(tf.range(window, l + window), l)
   return tf.concat([acc, tf.transpose(tf.stack([batch, start, end]))], axis=0)
开发者ID:rwth-i6,项目名称:returnn,代码行数:7,代码来源:TFNetworkSegModLayer.py


示例14: loop

    def loop(q_, mask, mass_, found_):
        q_list = tf.dynamic_partition(q_, mask, 2)
        condition_indices = tf.dynamic_partition(tf.range(tf.shape(q_)[0]), mask, 2)  # 0 element it False,
        #  1 element if true

        p = q_list[1] * (1.0 - mass_) / tf.reduce_sum(q_list[1])
        p_new = tf.dynamic_stitch(condition_indices, [q_list[0], p])

        # condition verification and mask modification
        less_mask = tf.cast(tf.less(u, p_new), tf.int32)  # 0 when u is bigger than p, 1 when u is less than p
        condition_indices = tf.dynamic_partition(tf.range(tf.shape(p_new)[0]), less_mask,
                                                 2)  # 0 when u is bigger than p, 1 when u is less than p

        split_p_new = tf.dynamic_partition(p_new, less_mask, 2)
        split_u = tf.dynamic_partition(u, less_mask, 2)

        alpha = tf.dynamic_stitch(condition_indices, [split_p_new[0], split_u[1]])
        mass_ += tf.reduce_sum(split_u[1])

        mask = mask * (tf.ones_like(less_mask) - less_mask)

        found_ = tf.cond(tf.equal(tf.reduce_sum(less_mask), 0),
                         lambda: False,
                         lambda: True)

        alpha = tf.reshape(alpha, q_.shape)

        return alpha, mask, mass_, found_
开发者ID:RileyShe,项目名称:DeepPavlov,代码行数:28,代码来源:tf_csoftmax_attention.py


示例15: _potential_scale_reduction_single_state

def _potential_scale_reduction_single_state(state, independent_chain_ndims):
  """potential_scale_reduction for one single state `Tensor`."""
  with tf.name_scope(
      'potential_scale_reduction_single_state',
      values=[state, independent_chain_ndims]):
    # We assume exactly one leading dimension indexes e.g. correlated samples
    # from each Markov chain.
    state = tf.convert_to_tensor(state, name='state')
    sample_ndims = 1

    sample_axis = tf.range(0, sample_ndims)
    chain_axis = tf.range(sample_ndims,
                          sample_ndims + independent_chain_ndims)
    sample_and_chain_axis = tf.range(
        0, sample_ndims + independent_chain_ndims)

    n = _axis_size(state, sample_axis)
    m = _axis_size(state, chain_axis)

    # In the language of Brooks and Gelman (1998),
    # B / n is the between chain variance, the variance of the chain means.
    # W is the within sequence variance, the mean of the chain variances.
    b_div_n = _reduce_variance(
        tf.reduce_mean(state, sample_axis, keepdims=True),
        sample_and_chain_axis,
        biased=False)
    w = tf.reduce_mean(
        _reduce_variance(state, sample_axis, keepdims=True, biased=True),
        sample_and_chain_axis)

    # sigma^2_+ is an estimate of the true variance, which would be unbiased if
    # each chain was drawn from the target.  c.f. "law of total variance."
    sigma_2_plus = w + b_div_n

    return ((m + 1.) / m) * sigma_2_plus / w - (n - 1.) / (m * n)
开发者ID:asudomoeva,项目名称:probability,代码行数:35,代码来源:diagnostic.py


示例16: scheduled_sample_count

def scheduled_sample_count(ground_truth_x,
                           generated_x,
                           batch_size,
                           scheduled_sample_var):
  """Sample batch with specified mix of groundtruth and generated data points.

  Args:
    ground_truth_x: tensor of ground-truth data points.
    generated_x: tensor of generated data points.
    batch_size: batch size
    scheduled_sample_var: number of ground-truth examples to include in batch.
  Returns:
    New batch with num_ground_truth sampled from ground_truth_x and the rest
    from generated_x.
  """
  num_ground_truth = scheduled_sample_var
  idx = tf.random_shuffle(tf.range(batch_size))
  ground_truth_idx = tf.gather(idx, tf.range(num_ground_truth))
  generated_idx = tf.gather(idx, tf.range(num_ground_truth, batch_size))

  ground_truth_examps = tf.gather(ground_truth_x, ground_truth_idx)
  generated_examps = tf.gather(generated_x, generated_idx)

  output = tf.dynamic_stitch([ground_truth_idx, generated_idx],
                             [ground_truth_examps, generated_examps])
  # if batch size is known set it.
  if isinstance(batch_size, int):
    output.set_shape([batch_size] + common_layers.shape_list(output)[1:])
  return output
开发者ID:qixiuai,项目名称:tensor2tensor,代码行数:29,代码来源:common_video.py


示例17: while_step

 def while_step(t, rnn_state, tas, accs):
   """Implements one timestep of FIVO computation."""
   log_weights_acc, log_p_hat_acc, kl_acc = accs
   cur_inputs, cur_mask = nested.read_tas([inputs_ta, mask_ta], t)
   # Run the cell for one step.
   log_q_z, log_p_z, log_p_x_given_z, kl, new_state = cell(
       cur_inputs,
       rnn_state,
       cur_mask,
   )
   # Compute the incremental weight and use it to update the current
   # accumulated weight.
   kl_acc += kl * cur_mask
   log_alpha = (log_p_x_given_z + log_p_z - log_q_z) * cur_mask
   log_alpha = tf.reshape(log_alpha, [num_samples, batch_size])
   log_weights_acc += log_alpha
   # Calculate the effective sample size.
   ess_num = 2 * tf.reduce_logsumexp(log_weights_acc, axis=0)
   ess_denom = tf.reduce_logsumexp(2 * log_weights_acc, axis=0)
   log_ess = ess_num - ess_denom
   # Calculate the ancestor indices via resampling. Because we maintain the
   # log unnormalized weights, we pass the weights in as logits, allowing
   # the distribution object to apply a softmax and normalize them.
   resampling_dist = tf.contrib.distributions.Categorical(
       logits=tf.transpose(log_weights_acc, perm=[1, 0]))
   ancestor_inds = tf.stop_gradient(
       resampling_dist.sample(sample_shape=num_samples, seed=random_seed))
   # Because the batch is flattened and laid out as discussed
   # above, we must modify ancestor_inds to index the proper samples.
   # The particles in the ith filter are distributed every batch_size rows
   # in the batch, and offset i rows from the top. So, to correct the indices
   # we multiply by the batch_size and add the proper offset. Crucially,
   # when ancestor_inds is flattened the layout of the batch is maintained.
   offset = tf.expand_dims(tf.range(batch_size), 0)
   ancestor_inds = tf.reshape(ancestor_inds * batch_size + offset, [-1])
   noresample_inds = tf.range(num_samples * batch_size)
   # Decide whether or not we should resample; don't resample if we are past
   # the end of a sequence.
   should_resample = resampling_criterion(num_samples, log_ess, t)
   should_resample = tf.logical_and(should_resample,
                                    cur_mask[:batch_size] > 0.)
   float_should_resample = tf.to_float(should_resample)
   ancestor_inds = tf.where(
       tf.tile(should_resample, [num_samples]),
       ancestor_inds,
       noresample_inds)
   new_state = nested.gather_tensors(new_state, ancestor_inds)
   # Update the TensorArrays before we reset the weights so that we capture
   # the incremental weights and not zeros.
   ta_updates = [log_weights_acc, log_ess, float_should_resample]
   new_tas = [ta.write(t, x) for ta, x in zip(tas, ta_updates)]
   # For the particle filters that resampled, update log_p_hat and
   # reset weights to zero.
   log_p_hat_update = tf.reduce_logsumexp(
       log_weights_acc, axis=0) - tf.log(tf.to_float(num_samples))
   log_p_hat_acc += log_p_hat_update * float_should_resample
   log_weights_acc *= (1. - tf.tile(float_should_resample[tf.newaxis, :],
                                    [num_samples, 1]))
   new_accs = (log_weights_acc, log_p_hat_acc, kl_acc)
   return t + 1, new_state, new_tas, new_accs
开发者ID:ALISCIFP,项目名称:models,代码行数:60,代码来源:bounds.py


示例18: _get_values_from_start_and_end

  def _get_values_from_start_and_end(self, input_tensor, num_start_samples,
                                     num_end_samples, total_num_samples):
    """slices num_start_samples and last num_end_samples from input_tensor.

    Args:
      input_tensor: An int32 tensor of shape [N] to be sliced.
      num_start_samples: Number of examples to be sliced from the beginning
        of the input tensor.
      num_end_samples: Number of examples to be sliced from the end of the
        input tensor.
      total_num_samples: Sum of is num_start_samples and num_end_samples. This
        should be a scalar.

    Returns:
      A tensor containing the first num_start_samples and last num_end_samples
      from input_tensor.

    """
    input_length = tf.shape(input_tensor)[0]
    start_positions = tf.less(tf.range(input_length), num_start_samples)
    end_positions = tf.greater_equal(
        tf.range(input_length), input_length - num_end_samples)
    selected_positions = tf.logical_or(start_positions, end_positions)
    selected_positions = tf.cast(selected_positions, tf.int32)
    indexed_positions = tf.multiply(tf.cumsum(selected_positions),
                                    selected_positions)
    one_hot_selector = tf.one_hot(indexed_positions - 1,
                                  total_num_samples,
                                  dtype=tf.int32)
    return tf.tensordot(input_tensor, one_hot_selector, axes=[0, 0])
开发者ID:ALISCIFP,项目名称:models,代码行数:30,代码来源:balanced_positive_negative_sampler.py


示例19: max_unpool

def max_unpool(inputs, pooling_indices, output_shape=None, k_size=[1, 2, 2, 1]):
    # NOTE! this function is based on the implementation by kwotsin in
    # https://github.com/kwotsin/TensorFlow-ENet

    # inputs has shape [batch_size, height, width, channels]

    # pooling_indices: pooling indices of the previously max_pooled layer

    # output_shape: what shape the returned tensor should have

    pooling_indices = tf.cast(pooling_indices, tf.int32)
    input_shape = tf.shape(inputs, out_type=tf.int32)

    one_like_pooling_indices = tf.ones_like(pooling_indices, dtype=tf.int32)
    batch_shape = tf.concat([[input_shape[0]], [1], [1], [1]], 0)
    batch_range = tf.reshape(tf.range(input_shape[0], dtype=tf.int32), shape=batch_shape)
    b = one_like_pooling_indices*batch_range
    y = pooling_indices//(output_shape[2]*output_shape[3])
    x = (pooling_indices//output_shape[3]) % output_shape[2]
    feature_range = tf.range(output_shape[3], dtype=tf.int32)
    f = one_like_pooling_indices*feature_range

    inputs_size = tf.size(inputs)
    indices = tf.transpose(tf.reshape(tf.stack([b, y, x, f]), [4, inputs_size]))
    values = tf.reshape(inputs, [inputs_size])

    ret = tf.scatter_nd(indices, values, output_shape)

    return ret
开发者ID:ascenoputing,项目名称:SemanticSegmentation_DL,代码行数:29,代码来源:network.py


示例20: reorder_beam

def reorder_beam(beam_size, batch_size, beam_val, output, is_first,
                 tensors_to_reorder):
  """Reorder to minimize beam costs."""
  # beam_val is [batch_size x beam_size]; let b = batch_size * beam_size
  # decided is len x b x a x b
  # output is b x out_size; step is b x len x a x b;
  outputs = tf.split(axis=0, num_or_size_splits=beam_size, value=tf.nn.log_softmax(output))
  all_beam_vals, all_beam_idx = [], []
  beam_range = 1 if is_first else beam_size
  for i in xrange(beam_range):
    top_out, top_out_idx = tf.nn.top_k(outputs[i], k=beam_size)
    cur_beam_val = beam_val[:, i]
    top_out = tf.Print(top_out, [top_out, top_out_idx, beam_val, i,
                                 cur_beam_val], "GREPO", summarize=8)
    all_beam_vals.append(top_out + tf.expand_dims(cur_beam_val, 1))
    all_beam_idx.append(top_out_idx)
  all_beam_idx = tf.reshape(tf.transpose(tf.concat(axis=1, values=all_beam_idx), [1, 0]),
                            [-1])
  top_beam, top_beam_idx = tf.nn.top_k(tf.concat(axis=1, values=all_beam_vals), k=beam_size)
  top_beam_idx = tf.Print(top_beam_idx, [top_beam, top_beam_idx],
                          "GREP", summarize=8)
  reordered = [[] for _ in xrange(len(tensors_to_reorder) + 1)]
  top_out_idx = []
  for i in xrange(beam_size):
    which_idx = top_beam_idx[:, i] * batch_size + tf.range(batch_size)
    top_out_idx.append(tf.gather(all_beam_idx, which_idx))
    which_beam = top_beam_idx[:, i] / beam_size  # [batch]
    which_beam = which_beam * batch_size + tf.range(batch_size)
    reordered[0].append(tf.gather(output, which_beam))
    for i, t in enumerate(tensors_to_reorder):
      reordered[i + 1].append(tf.gather(t, which_beam))
  new_tensors = [tf.concat(axis=0, values=t) for t in reordered]
  top_out_idx = tf.concat(axis=0, values=top_out_idx)
  return (top_beam, new_tensors[0], top_out_idx, new_tensors[1:])
开发者ID:Jmq14,项目名称:models,代码行数:34,代码来源:neural_gpu.py



注:本文中的tensorflow.range函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。


鲜花

握手

雷人

路过

鸡蛋
该文章已有0人参与评论

请发表评论

全部评论

专题导读
上一篇:
Python tensorflow.rank函数代码示例发布时间:2022-05-27
下一篇:
Python tensorflow.random_uniform_initializer函数代码示例发布时间:2022-05-27
热门推荐
阅读排行榜

扫描微信二维码

查看手机版网站

随时了解更新最新资讯

139-2527-9053

在线客服(服务时间 9:00~18:00)

在线QQ客服
地址:深圳市南山区西丽大学城创智工业园
电邮:jeky_zhao#qq.com
移动电话:139-2527-9053

Powered by 互联科技 X3.4© 2001-2213 极客世界.|Sitemap