• 设为首页
  • 点击收藏
  • 手机版
    手机扫一扫访问
    迪恩网络手机版
  • 关注官方公众号
    微信扫一扫关注
    公众号

Python tensorflow.floor函数代码示例

原作者: [db:作者] 来自: [db:来源] 收藏 邀请

本文整理汇总了Python中tensorflow.floor函数的典型用法代码示例。如果您正苦于以下问题:Python floor函数的具体用法?Python floor怎么用?Python floor使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。



在下文中一共展示了floor函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。

示例1: test_forward_floor

def test_forward_floor():
    ishape = (1, 3, 10, 10)
    inp_array = np.random.uniform(size=ishape).astype(np.float32)
    with tf.Graph().as_default():
        in1 = tf.placeholder(shape=inp_array.shape, dtype=inp_array.dtype)
        tf.floor(in1)
        compare_tf_with_tvm(inp_array, 'Placeholder:0', 'Floor:0')
开发者ID:LANHUIYING,项目名称:tvm,代码行数:7,代码来源:test_forward.py


示例2: process_reals

def process_reals(x, lod, mirror_augment, drange_data, drange_net):
    with tf.name_scope('ProcessReals'):
        with tf.name_scope('DynamicRange'):
            x = tf.cast(x, tf.float32)
            x = misc.adjust_dynamic_range(x, drange_data, drange_net)
        if mirror_augment:
            with tf.name_scope('MirrorAugment'):
                s = tf.shape(x)
                mask = tf.random_uniform([s[0], 1, 1, 1], 0.0, 1.0)
                mask = tf.tile(mask, [1, s[1], s[2], s[3]])
                x = tf.where(mask < 0.5, x, tf.reverse(x, axis=[3]))
        with tf.name_scope('FadeLOD'): # Smooth crossfade between consecutive levels-of-detail.
            s = tf.shape(x)
            y = tf.reshape(x, [-1, s[1], s[2]//2, 2, s[3]//2, 2])
            y = tf.reduce_mean(y, axis=[3, 5], keepdims=True)
            y = tf.tile(y, [1, 1, 1, 2, 1, 2])
            y = tf.reshape(y, [-1, s[1], s[2], s[3]])
            x = tfutil.lerp(x, y, lod - tf.floor(lod))
        with tf.name_scope('UpscaleLOD'): # Upscale to match the expected input/output size of the networks.
            s = tf.shape(x)
            factor = tf.cast(2 ** tf.floor(lod), tf.int32)
            x = tf.reshape(x, [-1, s[1], s[2], 1, s[3], 1])
            x = tf.tile(x, [1, 1, 1, factor, 1, factor])
            x = tf.reshape(x, [-1, s[1], s[2] * factor, s[3] * factor])
        return x
开发者ID:Gavin666Github,项目名称:progressive_growing_of_gans,代码行数:25,代码来源:train.py


示例3: _interpolate2d

def _interpolate2d(imgs, x, y):
    n_batch = tf.shape(imgs)[0]
    xlen = tf.shape(imgs)[1]
    ylen = tf.shape(imgs)[2]
    n_channel = tf.shape(imgs)[3]

    x = tf.to_float(x)
    y = tf.to_float(y)
    xlen_f = tf.to_float(xlen)
    ylen_f = tf.to_float(ylen)
    zero = tf.zeros([], dtype='int32')
    max_x = tf.cast(xlen - 1, 'int32')
    max_y = tf.cast(ylen - 1, 'int32')

    # scale indices from [-1, 1] to [0, xlen/ylen]
    x = (x + 1.) * (xlen_f - 1.) * 0.5
    y = (y + 1.) * (ylen_f - 1.) * 0.5

    # do sampling
    x0 = tf.cast(tf.floor(x), 'int32')
    x1 = x0 + 1
    y0 = tf.cast(tf.floor(y), 'int32')
    y1 = y0 + 1

    x0 = tf.clip_by_value(x0, zero, max_x)
    x1 = tf.clip_by_value(x1, zero, max_x)
    y0 = tf.clip_by_value(y0, zero, max_y)
    y1 = tf.clip_by_value(y1, zero, max_y)
    base = _repeat(tf.range(n_batch) * xlen * ylen, ylen * xlen)
    base_x0 = base + x0 * ylen
    base_x1 = base + x1 * ylen
    index00 = base_x0 + y0
    index01 = base_x0 + y1
    index10 = base_x1 + y0
    index11 = base_x1 + y1

    # use indices to lookup pixels in the flat image and restore
    # n_channel dim
    imgs_flat = tf.reshape(imgs, [-1, n_channel])
    imgs_flat = tf.to_float(imgs_flat)
    I00 = tf.gather(imgs_flat, index00)
    I01 = tf.gather(imgs_flat, index01)
    I10 = tf.gather(imgs_flat, index10)
    I11 = tf.gather(imgs_flat, index11)

    # and finally calculate interpolated values
    dx = x - tf.to_float(x0)
    dy = y - tf.to_float(y0)
    w00 = tf.expand_dims((1. - dx) * (1. - dy), 1)
    w01 = tf.expand_dims((1. - dx) * dy, 1)
    w10 = tf.expand_dims(dx * (1. - dy), 1)
    w11 = tf.expand_dims(dx * dy, 1)
    output = tf.add_n([w00*I00, w01*I01, w10*I10, w11*I11])

    # reshape
    output = tf.reshape(output, [n_batch, xlen, ylen, n_channel])

    return output
开发者ID:Ryo-Ito,项目名称:spatial_transformer_network,代码行数:58,代码来源:warp.py


示例4: staircase_loss

def staircase_loss(y_true, y_pred, var_a=16.0, cnst=1.0/255.0):
    """ Keras Staircase Loss """
    height = cnst
    width = cnst
    var_x = K.clip(K.abs(y_true - y_pred) - 0.5 * cnst, 0.0, 1.0)
    loss = height*(K.tanh(var_a*((var_x/width)-tf.floor(var_x/width)-0.5)) /
                   (2.0*K.tanh(var_a/2.0)) + 0.5 + tf.floor(var_x/width))
    loss += 1e-10
    return K.mean(loss, axis=-1)
开发者ID:stonezuohui,项目名称:faceswap,代码行数:9,代码来源:losses.py


示例5: get_output_shape_tensor

 def get_output_shape_tensor(self, flatten=None):
     if flatten == None:
         flatten = self.flatten
     with tf.name_scope(self.layer_name):
         if self.conv_padding.lower() == 'same':
             if self.pool:
                 if self.pool_type.lower() == 'same':
                     out_shape = (self.input_shape[0],
                                  tf.to_int32(tf.ceil(tf.ceil(tf.to_float(self.input_shape[1]) /
                                                              self.conv_stride[1]) / self.pool_size[0])),
                                  tf.to_int32(tf.ceil(tf.ceil(tf.to_float(self.input_shape[2])) /
                                                      self.conv_stride[2]) / self.pool_size[1]),
                                  self.filter_shape[3])
                 elif self.pool_type.lower() == 'valid':
                     out_shape = (self.input_shape[0],
                                  tf.to_int32(tf.floor(tf.ceil(tf.to_float(self.input_shape[1]) /
                                                               self.conv_stride[1]) / self.pool_size[0])),
                                  tf.to_int32(
                                      tf.floor(tf.to_float(tf.ceil(tf.to_float(self.input_shape[2])) /
                                                           self.conv_stride[2]) / self.pool_size[1])),
                                  self.filter_shape[3])
             else:
                 out_shape = (self.input_shape[0],
                              tf.to_int32(tf.ceil(tf.to_float(self.input_shape[1]) / self.conv_stride[1])),
                              tf.to_int32(tf.ceil(tf.to_float(self.input_shape[2])) / self.conv_stride[2]),
                              self.filter_shape[3])
         elif self.conv_padding.lower() == 'valid':
             if self.pool:
                 if self.pool_type.lower() == 'same':
                     out_shape = (self.input_shape[0],
                                  tf.to_int32(tf.ceil(np.ceil(
                                      tf.to_float(self.input_shape[1] - self.filter_shape[0] + 1) /
                                      self.conv_stride[1])) / self.pool_size[0]),
                                  tf.to_int32(tf.ceil(np.ceil(
                                      tf.to_float(self.input_shape[2] - self.filter_shape[1] + 1) /
                                      self.conv_stride[2])) / self.pool_size[1]),
                                  self.filter_shape[3])
                 elif self.pool_type.lower() == 'valid':
                     out_shape = (self.input_shape[0],
                                  tf.to_int32(tf.floor(np.ceil(
                                      tf.to_float(self.input_shape[1] - self.filter_shape[0] + 1) /
                                      self.conv_stride[1])) / self.pool_size[0]),
                                  tf.to_int32(tf.floor(np.ceil(
                                      tf.to_float(self.input_shape[2] - self.filter_shape[1] + 1) /
                                      self.conv_stride[2])) / self.pool_size[1]),
                                  self.filter_shape[3])
             else:
                 out_shape = (self.input_shape[0],
                              tf.to_int32(
                                  tf.ceil(tf.to_float(self.input_shape[1] - self.filter_shape[0] + 1) /
                                          self.conv_stride[1])),
                              tf.to_int32(
                                  tf.ceil(tf.to_float(self.input_shape[2] - self.filter_shape[1] + 1) /
                                          self.conv_stride[2])),
                              self.filter_shape[3])
     return (out_shape[0], out_shape[1] * out_shape[2] * out_shape[3]) if flatten else out_shape
开发者ID:savourylie,项目名称:fucos-tensorflow,代码行数:56,代码来源:layers.py


示例6: sample_img

def sample_img(img, n_samples):
    sx = tf.random_uniform((n_samples,), 0, 1) * 27
    sy = tf.random_uniform((n_samples,), 0, 1) * 27
    sx_lower = tf.cast(tf.floor(sx), tf.int32)
    sx_upper = tf.cast(tf.ceil(sx), tf.int32)

    sy_lower = tf.cast(tf.floor(sy), tf.int32)
    sy_upper = tf.cast(tf.ceil(sy), tf.int32)

    sx_nearest = tf.cast(tf.round(sx), tf.int32)
    sy_nearest = tf.cast(tf.round(sy), tf.int32)
    inds = tf.pack([sx_nearest, sy_nearest])
    samples = tf.gather(tf.reshape(img, (-1,)), sx_nearest + sy_nearest*28)
    return sx/27, sy/27, samples
开发者ID:lukemetz,项目名称:cppn,代码行数:14,代码来源:model.py


示例7: _log_unnormalized_prob

 def _log_unnormalized_prob(self, x):
   safe_x = tf.maximum(x if self.interpolate_nondiscrete else tf.floor(x), 1.)
   y = -self.power * tf.log(safe_x)
   is_supported = tf.broadcast_to(tf.equal(x, safe_x), tf.shape(y))
   neg_inf = tf.fill(
       tf.shape(y), value=np.array(-np.inf, dtype=y.dtype.as_numpy_dtype))
   return tf.where(is_supported, y, neg_inf)
开发者ID:asudomoeva,项目名称:probability,代码行数:7,代码来源:zipf.py


示例8: dropout_sparse

def dropout_sparse(x, keep_prob, num_nonzero_elems):
    noise_shape = [num_nonzero_elems]
    random_tensor = keep_prob
    random_tensor += tf.random_uniform(noise_shape)
    dropout_mask = tf.cast(tf.floor(random_tensor), dtype=tf.bool)
    pre_out = tf.sparse_retain(x, dropout_mask)
    return pre_out * (1./keep_prob)
开发者ID:burakbayramli,项目名称:classnotes,代码行数:7,代码来源:util.py


示例9: generate_dropout_masks

def generate_dropout_masks(keep_prob, shape, amount):
  masks = []
  for _ in range(amount):
    dropout_mask = tf.random_uniform(shape) + (keep_prob)
    dropout_mask = tf.floor(dropout_mask) / (keep_prob)
    masks.append(dropout_mask)
  return masks
开发者ID:ALISCIFP,项目名称:models,代码行数:7,代码来源:variational_dropout.py


示例10: count_sketch

def count_sketch(probs, project_size):
    """ Calculates count-min sketch of a tensor.
    Args:
      probs: A `Tensor`
      project_size: output size (`int`)

    Returns:c
      A projected count-min sketch `Tensor` with shape [batch_size, project_size].
    """
    with tf.variable_scope('CountSketch_'+probs.name.replace(':', '_')) as scope:
        input_size = int(probs.get_shape()[1])

        # h, s must be sampled once
        history = tf.get_collection('__countsketch')
        if scope.name in history: scope.reuse_variables()
        tf.add_to_collection('__countsketch', scope.name)

        h = tf.get_variable('h', [input_size], initializer=tf.random_uniform_initializer(0, project_size), trainable=False)
        s = tf.get_variable('s', [input_size], initializer=tf.random_uniform_initializer(0, 2), trainable=False)

        h = tf.cast(h, 'int32')
        s = tf.cast(tf.floor(s) * 2 - 1, 'int32') # 1 or -1

        sk = _sketch_op.count_sketch(probs, h, s, project_size)
        sk.set_shape([probs.get_shape()[0], project_size])
        return sk
开发者ID:ml-ai-nlp-ir,项目名称:compact-bilinear-pooling-tf,代码行数:26,代码来源:count_sketch.py


示例11: rnn_decoder

def rnn_decoder(cell, inputs, initial_state, embedding_size, embedding_length, sequence_length,
                name='RNNDecoder', reuse=False, use_inputs_prob=0.0, static_input=None):
    with tf.variable_scope(name, reuse=reuse):
        # print(tf.get_variable_scope().reuse, tf.get_variable_scope().name)
        with tf.name_scope("embedding"):
            batch_size = tf.shape(initial_state)[0]
            embedding_table = tf.get_variable(
                name='embedding_table',
                shape=[embedding_length, embedding_size],
                initializer=tf.truncated_normal_initializer(stddev=glorot_mul(embedding_length, embedding_size)),
            )
            # 0 is index for _SOS_ (start of sentence symbol)
            initial_embedding = tf.gather(embedding_table, tf.zeros(tf.pack([batch_size]), tf.int32))

        states = [initial_state]
        outputs = []
        outputs_softmax = []
        decoder_outputs_argmax_embedding = []

        for j in range(sequence_length):
            with tf.variable_scope(tf.get_variable_scope(), reuse=True if j > 0 else None):
                # get input :
                #   either feedback the previous decoder argmax output
                #   or use the provided input (note that you have to use the previous input (index si therefore -1)
                input = initial_embedding
                if j > 0:
                    true_input = tf.gather(embedding_table, inputs[j - 1])
                    decoded_input = decoder_outputs_argmax_embedding[-1]
                    choice = tf.floor(tf.random_uniform([1], use_inputs_prob, 1 + use_inputs_prob, tf.float32))
                    input = choice * true_input + (1.0 - choice) * decoded_input

                if static_input:
                    input = tf.concat(1, [input, static_input])

                # print(tf.get_variable_scope().reuse, tf.get_variable_scope().name)
                output, state = cell(input, states[-1])

                projection = linear(
                    input=output,
                    input_size=cell.output_size,
                    output_size=embedding_length,
                    name='output_linear_projection'
                )

                outputs.append(projection)
                states.append(state)

                softmax = tf.nn.softmax(projection, name="output_softmax")
                # we do no compute the gradient trough argmax
                output_argmax = tf.stop_gradient(tf.argmax(softmax, 1))
                # we do no compute the gradient for embeddings when used with noisy argmax outputs
                output_argmax_embedding = tf.stop_gradient(tf.gather(embedding_table, output_argmax))
                decoder_outputs_argmax_embedding.append(output_argmax_embedding)

                outputs_softmax.append(tf.expand_dims(softmax, 1))

    # remove the initial state
    states = states[1:]

    return states, outputs, outputs_softmax
开发者ID:jurcicek,项目名称:ndm,代码行数:60,代码来源:bricks.py


示例12: _cdf

  def _cdf(self, y):
    low = self._low
    high = self._high

    # Recall the promise:
    # cdf(y) := P[Y <= y]
    #         = 1, if y >= high,
    #         = 0, if y < low,
    #         = P[X <= y], otherwise.

    # P[Y <= j] = P[floor(Y) <= j] since mass is only at integers, not in
    # between.
    j = tf.floor(y)

    # P[X <= j], used when low < X < high.
    result_so_far = self.distribution.cdf(j)

    # Broadcast, because it's possible that this is a single distribution being
    # evaluated on a number of samples, or something like that.
    j += tf.zeros_like(result_so_far)

    # Re-define values at the cutoffs.
    if low is not None:
      result_so_far = tf.where(j < low, tf.zeros_like(result_so_far),
                               result_so_far)
    if high is not None:
      result_so_far = tf.where(j >= high, tf.ones_like(result_so_far),
                               result_so_far)

    return result_so_far
开发者ID:lewisKit,项目名称:probability,代码行数:30,代码来源:quantized_distribution.py


示例13: fpn_map_rois_to_levels

def fpn_map_rois_to_levels(boxes):
    """
    Assign boxes to level 2~5.

    Args:
        boxes (nx4):

    Returns:
        [tf.Tensor]: 4 tensors for level 2-5. Each tensor is a vector of indices of boxes in its level.
        [tf.Tensor]: 4 tensors, the gathered boxes in each level.

    Be careful that the returned tensor could be empty.
    """
    sqrtarea = tf.sqrt(tf_area(boxes))
    level = tf.to_int32(tf.floor(
        4 + tf.log(sqrtarea * (1. / 224) + 1e-6) * (1.0 / np.log(2))))

    # RoI levels range from 2~5 (not 6)
    level_ids = [
        tf.where(level <= 2),
        tf.where(tf.equal(level, 3)),   # == is not supported
        tf.where(tf.equal(level, 4)),
        tf.where(level >= 5)]
    level_ids = [tf.reshape(x, [-1], name='roi_level{}_id'.format(i + 2))
                 for i, x in enumerate(level_ids)]
    num_in_levels = [tf.size(x, name='num_roi_level{}'.format(i + 2))
                     for i, x in enumerate(level_ids)]
    add_moving_summary(*num_in_levels)

    level_boxes = [tf.gather(boxes, ids) for ids in level_ids]
    return level_ids, level_boxes
开发者ID:tobyma,项目名称:tensorpack,代码行数:31,代码来源:model.py


示例14: loop_body

    def loop_body(should_continue, k):
      """Resample the non-accepted points."""
      # The range of U is chosen so that the resulting sample K lies in
      # [0, tf.int64.max). The final sample, if accepted, is K + 1.
      u = tf.random_uniform(
          shape,
          minval=minval_u,
          maxval=maxval_u,
          dtype=self.power.dtype,
          seed=seed())

      # Sample the point X from the continuous density h(x) \propto x^(-power).
      x = self._hat_integral_inverse(u)

      # Rejection-inversion requires a `hat` function, h(x) such that
      # \int_{k - .5}^{k + .5} h(x) dx >= pmf(k + 1) for points k in the
      # support. A natural hat function for us is h(x) = x^(-power).
      #
      # After sampling X from h(x), suppose it lies in the interval
      # (K - .5, K + .5) for integer K. Then the corresponding K is accepted if
      # if lies to the left of x_K, where x_K is defined by:
      #   \int_{x_k}^{K + .5} h(x) dx = H(x_K) - H(K + .5) = pmf(K + 1),
      # where H(x) = \int_x^inf h(x) dx.

      # Solving for x_K, we find that x_K = H_inverse(H(K + .5) + pmf(K + 1)).
      # Or, the acceptance condition is X <= H_inverse(H(K + .5) + pmf(K + 1)).
      # Since X = H_inverse(U), this simplifies to U <= H(K + .5) + pmf(K + 1).

      # Update the non-accepted points.
      # Since X \in (K - .5, K + .5), the sample K is chosen as floor(X + 0.5).
      k = tf.where(should_continue, tf.floor(x + 0.5), k)
      accept = (u <= self._hat_integral(k + .5) + tf.exp(self._log_prob(k + 1)))

      return [should_continue & (~accept), k]
开发者ID:asudomoeva,项目名称:probability,代码行数:34,代码来源:zipf.py


示例15: quantize

def quantize(t, quant_scale, max_value=1.0):
  """Quantize a tensor t with each element in [-max_value, max_value]."""
  t = tf.minimum(max_value, tf.maximum(t, -max_value))
  big = quant_scale * (t + max_value) + 0.5
  with tf.get_default_graph().gradient_override_map({"Floor": "CustomIdG"}):
    res = (tf.floor(big) / quant_scale) - max_value
  return res
开发者ID:Jmq14,项目名称:models,代码行数:7,代码来源:neural_gpu.py


示例16: test_ImageSample

    def test_ImageSample(self):
        import numpy as np
        h, w = 3, 4

        def np_sample(img, coords):
            # a reference implementation
            coords = np.maximum(coords, 0)
            coords = np.minimum(coords,
                                np.array([img.shape[1] - 1, img.shape[2] - 1]))
            xs = coords[:, :, :, 1].reshape((img.shape[0], -1))
            ys = coords[:, :, :, 0].reshape((img.shape[0], -1))

            ret = np.zeros((img.shape[0], coords.shape[1], coords.shape[2],
                            img.shape[3]), dtype='float32')
            for k in range(img.shape[0]):
                xss, yss = xs[k], ys[k]
                ret[k, :, :, :] = img[k, yss, xss, :].reshape((coords.shape[1],
                                                               coords.shape[2], 3))
            return ret

        bimg = np.random.rand(2, h, w, 3).astype('float32')

        # mat = np.array([
        # [[[1,1], [1.2,1.2]], [[-1, -1], [2.5, 2.5]]],
        # [[[1,1], [1.2,1.2]], [[-1, -1], [2.5, 2.5]]]
        # ], dtype='float32')  #2x2x2x2
        mat = (np.random.rand(2, 5, 5, 2) - 0.2) * np.array([h + 3, w + 3])
        true_res = np_sample(bimg, np.floor(mat + 0.5).astype('int32'))

        inp, mapping = self.make_variable(bimg, mat)
        output = sample(inp, tf.cast(tf.floor(mapping + 0.5), tf.int32))
        res = self.run_variable(output)

        self.assertTrue((res == true_res).all())
开发者ID:j50888,项目名称:tensorpack,代码行数:34,代码来源:image_sample.py


示例17: sparse_dropout

def sparse_dropout(x, keep_prob, noise_shape):
    """Dropout for sparse tensors."""
    random_tensor = keep_prob
    random_tensor += tf.random_uniform(noise_shape)
    dropout_mask = tf.cast(tf.floor(random_tensor), dtype=tf.bool)
    pre_out = tf.sparse_retain(x, dropout_mask)
    return pre_out * (1./keep_prob)
开发者ID:Eilene,项目名称:gcn,代码行数:7,代码来源:layers.py


示例18: sample

def sample(probabilities):
    '''
    Sample a tensor based on the probabilities
    :param probabilities: A tensor of probabilities given by 'restricted_boltzman_machine.get_probabilities'
    :return: A sampled sampled tensor
    '''
    return tf.floor(probabilities + tf.random_uniform(tf.shape(probabilities), 0, 1))
开发者ID:AdamStefan,项目名称:DeepLearning,代码行数:7,代码来源:rbm.py


示例19: ImageSample

def ImageSample(inputs, borderMode='repeat'):
    """
    Sample the images using the given coordinates, by bilinear interpolation.
    This was described in the paper:
    `Spatial Transformer Networks <http://arxiv.org/abs/1506.02025>`_.

    This is equivalent to `torch.nn.functional.grid_sample`,
    up to some non-trivial coordinate transformation.

    This implementation returns pixel value at pixel (1, 1) for a floating point coordinate (1.0, 1.0).
    Note that this may not be what you need.

    Args:
        inputs (list): [images, coords]. images has shape NHWC.
            coords has shape (N, H', W', 2), where each pair of the last dimension is a (y, x) real-value
            coordinate.
        borderMode: either "repeat" or "constant" (zero-filled)

    Returns:
        tf.Tensor: a tensor named ``output`` of shape (N, H', W', C).
    """
    log_deprecated("ImageSample", "Please implement it in your own code instead!", "2018-12-01")
    image, mapping = inputs
    assert image.get_shape().ndims == 4 and mapping.get_shape().ndims == 4
    input_shape = image.get_shape().as_list()[1:]
    assert None not in input_shape, \
        "Images in ImageSample layer must have fully-defined shape"
    assert borderMode in ['repeat', 'constant']

    orig_mapping = mapping
    mapping = tf.maximum(mapping, 0.0)
    lcoor = tf.floor(mapping)
    ucoor = lcoor + 1

    diff = mapping - lcoor
    neg_diff = 1.0 - diff  # bxh2xw2x2

    lcoory, lcoorx = tf.split(lcoor, 2, 3)
    ucoory, ucoorx = tf.split(ucoor, 2, 3)

    lyux = tf.concat([lcoory, ucoorx], 3)
    uylx = tf.concat([ucoory, lcoorx], 3)

    diffy, diffx = tf.split(diff, 2, 3)
    neg_diffy, neg_diffx = tf.split(neg_diff, 2, 3)

    ret = tf.add_n([sample(image, lcoor) * neg_diffx * neg_diffy,
                    sample(image, ucoor) * diffx * diffy,
                    sample(image, lyux) * neg_diffy * diffx,
                    sample(image, uylx) * diffy * neg_diffx], name='sampled')
    if borderMode == 'constant':
        max_coor = tf.constant([input_shape[0] - 1, input_shape[1] - 1], dtype=tf.float32)
        mask = tf.greater_equal(orig_mapping, 0.0)
        mask2 = tf.less_equal(orig_mapping, max_coor)
        mask = tf.logical_and(mask, mask2)  # bxh2xw2x2
        mask = tf.reduce_all(mask, [3])  # bxh2xw2 boolean
        mask = tf.expand_dims(mask, 3)
        ret = ret * tf.cast(mask, tf.float32)
    return tf.identity(ret, name='output')
开发者ID:quanlzheng,项目名称:tensorpack,代码行数:59,代码来源:image_sample.py


示例20: imageWarpIm

def imageWarpIm(imageBatch,pMtrxBatch,opt,name=None):
	with tf.name_scope("ImWarp"):
		imageBatch = tf.expand_dims(imageBatch,-1)
		batchSize = tf.shape(imageBatch)[0]
		imageH,imageW = opt.H,opt.H
		H,W = opt.H,opt.W
		warpGTmtrxBatch = tf.tile(tf.expand_dims(opt.warpGTmtrx,0),[batchSize,1,1])
		transMtrxBatch = tf.matmul(warpGTmtrxBatch,pMtrxBatch)
		# warp the canonical coordinates
		X,Y = np.meshgrid(np.linspace(-1,1,W),np.linspace(-1,1,H))
		XYhom = tf.transpose(tf.stack([X.reshape([-1]),Y.reshape([-1]),np.ones([X.size])],axis=1))
		XYhomBatch = tf.tile(tf.expand_dims(XYhom,0),[batchSize,1,1])
		XYwarpHomBatch = tf.matmul(transMtrxBatch,tf.to_float(XYhomBatch))
		XwarpHom,YwarpHom,ZwarpHom = tf.split(XYwarpHomBatch,3,1)
		Xwarp = tf.reshape(XwarpHom/ZwarpHom,[batchSize,H,W])
		Ywarp = tf.reshape(YwarpHom/ZwarpHom,[batchSize,H,W])
		# get the integer sampling coordinates
		Xfloor,Xceil = tf.floor(Xwarp),tf.ceil(Xwarp)
		Yfloor,Yceil = tf.floor(Ywarp),tf.ceil(Ywarp)
		XfloorInt,XceilInt = tf.to_int32(Xfloor),tf.to_int32(Xceil)
		YfloorInt,YceilInt = tf.to_int32(Yfloor),tf.to_int32(Yceil)
		imageIdx = tf.tile(tf.reshape(tf.range(batchSize),[batchSize,1,1]),[1,H,W])
		imageVec = tf.reshape(imageBatch,[-1,tf.shape(imageBatch)[3]])
		imageVecOutside = tf.concat([imageVec,tf.zeros([1,tf.shape(imageBatch)[3]])],0)
		idxUL = (imageIdx*imageH+YfloorInt)*imageW+XfloorInt
		idxUR = (imageIdx*imageH+YfloorInt)*imageW+XceilInt
		idxBL = (imageIdx*imageH+YceilInt)*imageW+XfloorInt
		idxBR = (imageIdx*imageH+YceilInt)*imageW+XceilInt
		idxOutside = tf.fill([batchSize,H,W],batchSize*imageH*imageW)
		def insideIm(Xint,Yint):
			return (Xint>=0)&(Xint<imageW)&(Yint>=0)&(Yint<imageH)
		idxUL = tf.where(insideIm(XfloorInt,YfloorInt),idxUL,idxOutside)
		idxUR = tf.where(insideIm(XceilInt,YfloorInt),idxUR,idxOutside)
		idxBL = tf.where(insideIm(XfloorInt,YceilInt),idxBL,idxOutside)
		idxBR = tf.where(insideIm(XceilInt,YceilInt),idxBR,idxOutside)
		# bilinear interpolation
		Xratio = tf.reshape(Xwarp-Xfloor,[batchSize,H,W,1])
		Yratio = tf.reshape(Ywarp-Yfloor,[batchSize,H,W,1])
		ImUL = tf.to_float(tf.gather(imageVecOutside,idxUL))*(1-Xratio)*(1-Yratio)
		ImUR = tf.to_float(tf.gather(imageVecOutside,idxUR))*(Xratio)*(1-Yratio)
		ImBL = tf.to_float(tf.gather(imageVecOutside,idxBL))*(1-Xratio)*(Yratio)
		ImBR = tf.to_float(tf.gather(imageVecOutside,idxBR))*(Xratio)*(Yratio)
		ImWarpBatch = ImUL+ImUR+ImBL+ImBR
		ImWarpBatch = tf.identity(ImWarpBatch,name=name)
	return ImWarpBatch
开发者ID:sunshinezhe,项目名称:IC-STN,代码行数:45,代码来源:data.py



注:本文中的tensorflow.floor函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。


鲜花

握手

雷人

路过

鸡蛋
该文章已有0人参与评论

请发表评论

全部评论

专题导读
上一篇:
Python tensorflow.gather函数代码示例发布时间:2022-05-27
下一篇:
Python tensorflow.fill函数代码示例发布时间:2022-05-27
热门推荐
阅读排行榜

扫描微信二维码

查看手机版网站

随时了解更新最新资讯

139-2527-9053

在线客服(服务时间 9:00~18:00)

在线QQ客服
地址:深圳市南山区西丽大学城创智工业园
电邮:jeky_zhao#qq.com
移动电话:139-2527-9053

Powered by 互联科技 X3.4© 2001-2213 极客世界.|Sitemap