• 设为首页
  • 点击收藏
  • 手机版
    手机扫一扫访问
    迪恩网络手机版
  • 关注官方公众号
    微信扫一扫关注
    公众号

Python tensor.ceil函数代码示例

原作者: [db:作者] 来自: [db:来源] 收藏 邀请

本文整理汇总了Python中theano.tensor.ceil函数的典型用法代码示例。如果您正苦于以下问题:Python ceil函数的具体用法?Python ceil怎么用?Python ceil使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。



在下文中一共展示了ceil函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。

示例1: _activation

    def _activation(self, Y, L, M, W):
        """Returns the activation for a given input.

        Derived from the generative model formulation of hierarchical
        Poisson mixtures, the formular for the activation in the network
        reads as follows:
        I_c =
         \sum_d \log(W_{cd})y_d + \log(M_{lc})        for labeled data
         \sum_d \log(W_{cd})y_d + \log(\sum_k M_{kc}) for unlabeled data
        s_c = softmax(I_c)
        """
        # first: complete inference to find label
        # Input integration:
        I = T.tensordot(Y,T.log(W),axes=[1,1])
        # recurrent term:
        vM = M[L]
        L_index = T.eq(L,-1).nonzero()
        vM = T.set_subtensor(vM[L_index], T.sum(M, axis=0))
        # numeric trick to prevent overflow in the exp-function
        max_exponent = 86. - T.ceil(T.log(I.shape[1].astype('float32')))
        scale = T.switch(
            T.gt(T.max(I, axis=1, keepdims=True), max_exponent),
            T.max(I, axis=1, keepdims=True) - max_exponent,
            0.)
        # numeric approximation to prevent underflow in the exp-function:
        # map too low values of I to a fixed minimum value
        min_exponent = -87. + T.ceil(T.log(I.shape[1].astype('float32')))
        I = T.switch(
            T.lt(I-scale, min_exponent),
            scale+min_exponent,
            I)
        # activation: recurrent softmax with overflow protection
        s = vM*T.exp(I-scale)/T.sum(vM*T.exp(I-scale), axis=1, keepdims=True)
        return s
开发者ID:smajida,项目名称:NeSi,代码行数:34,代码来源:poisson_theano_scan.py


示例2: process

 def process(self, input, tparams, BNparams):
     b, f, h0, w0 = input.shape
     result = []
     for h, w in self.pymamid:
         win_h = T.ceil(h0 / h).astype('int32')
         win_w = T.ceil(w0 / w).astype('int32')
         str_h = T.floor(h0 / h).astype('int32')
         str_w = T.floor(w0 / w).astype('int32')
         result.append(dnn_pool(
             img=input, ws=(win_h, win_w), mode=self.mode,
             stride=(str_h, str_w), pad=(0, 0)).reshape([b, -1]))
     return T.concatenate(result, axis=1)
开发者ID:wufangjie,项目名称:dnn,代码行数:12,代码来源:layers.py


示例3: pool_2d_nxn_regions

def pool_2d_nxn_regions(inputs, output_size, mode='max'):
    """
    Performs a pooling operation that results in a fixed size:
    output_size x output_size.
    Used by SpatialPyramidPoolingLayer. Refer to appendix A in [1]

    Parameters
    ----------
    inputs : a tensor with 4 dimensions (N x C x H x W)
    output_size: integer
        The output size of the pooling operation
    mode : string
        Pooling mode, one of 'max', 'average_inc_pad', 'average_exc_pad'
        Defaults to 'max'.

    Returns a list of tensors, for each output bin.
       The list contains output_size*output_size elements, where
       each element is a 3D tensor (N x C x 1)

    References
    ----------
    .. [1] He, Kaiming et al (2015):
           Spatial Pyramid Pooling in Deep Convolutional Networks
           for Visual Recognition.
           http://arxiv.org/pdf/1406.4729.pdf.
    """

    if mode == 'max':
        pooling_op = T.max
    elif mode in ['average_inc_pad', 'average_exc_pad']:
        pooling_op = T.mean
    else:
        msg = "Mode must be either 'max', 'average_inc_pad' or "
        msg += "'average_exc_pad'. Got '{0}'"
        raise ValueError(msg.format(mode))

    h, w = inputs.shape[2:]

    result = []
    n = float(output_size)

    for row in range(output_size):
        for col in range(output_size):
            start_h = T.floor(row / n * h).astype('int32')
            end_h = T.ceil((row + 1) / n * h).astype('int32')
            start_w = T.floor(col / n * w).astype('int32')
            end_w = T.ceil((col + 1) / n * w).astype('int32')

            pooling_region = inputs[:, :, start_h:end_h, start_w:end_w]
            this_result = pooling_op(pooling_region, axis=(2, 3))
            result.append(this_result.dimshuffle(0, 1, 'x'))
    return result
开发者ID:HapeMask,项目名称:Lasagne,代码行数:52,代码来源:pool.py


示例4: compute_hard_windows

    def compute_hard_windows(self, image_shape, location, scale):
        # find topleft(front) and bottomright(back) corners for each patch
        a = location - 0.5 * (T.cast(self.patch_shape, theano.config.floatX) / scale)
        b = location + 0.5 * (T.cast(self.patch_shape, theano.config.floatX) / scale)

        # grow by three patch pixels
        a -= self.kernel.k_sigma_radius(self.cutoff, scale)
        b += self.kernel.k_sigma_radius(self.cutoff, scale)

        # clip to fit inside image and have nonempty window
        a = T.clip(a, 0, image_shape - 1)
        b = T.clip(b, a + 1, image_shape)

        if self.batched_window:
            # take the bounding box of all windows; now the slices
            # will have the same length for each sample and scan can
            # be avoided.  comes at the cost of typically selecting
            # more of the input.
            a = a.min(axis=0, keepdims=True)
            b = b.max(axis=0, keepdims=True)

        # make integer
        a = T.cast(T.floor(a), 'int16')
        b = T.cast(T.ceil(b), 'int16')

        return a, b
开发者ID:mohammadpz,项目名称:rna,代码行数:26,代码来源:crop.py


示例5: __init__

    def __init__(self, input_ngram, input_sm, vocab_size, emb_dim, num_section, linear_W_emb=None, fix_emb=False, nonlinear=None, activation=None):
        
        global rng
        global init_range
        if linear_W_emb is None:
            # random initialize
            linear_W_emb = np.asarray(rng.uniform(
                low=-init_range, high=init_range, size=(vocab_size, emb_dim)), dtype=theano.config.floatX)
        else:
            # use the given model parameter
            given_vocab_size, given_emb_dim = linear_W_emb.shape
            assert(given_vocab_size == vocab_size and given_emb_dim == emb_dim)

        # shared variables
        self.W_emb = theano.shared(value=linear_W_emb, name='W_emb')

        # stack vectors
        input_ngram = T.cast(input_ngram, 'int32')
        input_sm = T.cast(input_sm, 'int32')

        # output is a matrix where each row correponds to a context_size embedding vector, and row number equals to batch size
        # output dimensions: batch_size * ((context_size + 1) * emb_dim)
        output_local = self.W_emb[input_ngram[:, :-1].flatten()].reshape(
            (input_ngram.shape[0], emb_dim * (input_ngram.shape[1] - 1)))  # self.W_emb.shape[1]
        
        sentence_lengths = input_sm[:,0]
        sentence_matrix = input_sm[:,1:]

        sentence_num = sentence_matrix.shape[0]
        global_length = sentence_matrix.shape[1]
        section_length = T.cast(T.ceil(global_length / float(num_section)), 'int32')

        # For the first section
        sentence_embeddings = T.mean(self.W_emb[sentence_matrix[:, :section_length].flatten()].reshape(
            (sentence_num, section_length, emb_dim)), axis=1)

        # For the rest sections
        for i in xrange(1, num_section):
            current_section = T.mean(self.W_emb[sentence_matrix[:, i*section_length:(i+1)*section_length].flatten()].reshape(
                (sentence_num, section_length, emb_dim)), axis=1)
            sentence_embeddings = T.concatenate([sentence_embeddings, current_section], axis=1)

        # get the sentence index for each ngram vector, and transform it to 0-based
        sentence_indeces = input_ngram[:,-1]
        base_index = sentence_indeces[0]
        sentence_indeces = sentence_indeces - base_index

        # the last column of output should be a weighted sum of the sentence
        # vectors
        output_global = sentence_embeddings[sentence_indeces.flatten()].reshape((sentence_indeces.shape[0], emb_dim * num_section))

        # handle non-linear layer
        if nonlinear is None or activation is None:
            self.output = T.concatenate([output_local, output_global], axis=1)
            # params is the word embedding matrix
            self.params = [self.W_emb] if not fix_emb else []
        else:
            self.non_linear_params, non_linear_output_global = addNonlinearLayer(output_global, emb_dim * num_section, nonlinear, activation)
            self.output = T.concatenate([output_local, non_linear_output_global], axis=1)
            self.params = [self.W_emb] + self.non_linear_params if not fix_emb else self.non_linear_params
开发者ID:lixiangnlp,项目名称:nnjm-global,代码行数:60,代码来源:model_util.py


示例6: spp_max_pool_axis_kwargs

def spp_max_pool_axis_kwargs(in_shape, out_shape):
    symbolic = (treeano.utils.is_variable(in_shape)
                or treeano.utils.is_variable(out_shape))
    # maxpool requires static shape
    assert not symbolic
    if symbolic:
        int_ceil = lambda x: T.ceil(x).astype("int32")
    else:
        int_ceil = lambda x: int(np.ceil(x))

    # eg. if input is 5 and output is 2, each pool size should be 3
    pool_size = int_ceil(in_shape / out_shape)
    # stride should equal pool_size, since we want non-overlapping regions
    stride = pool_size
    # pad as much as possible, since ignore_border=True
    padding = int_ceil((pool_size * out_shape - in_shape) / 2)

    if not symbolic:
        assert padding < pool_size

    return dict(
        ds=pool_size,
        st=stride,
        padding=padding,
    )
开发者ID:diogo149,项目名称:treeano,代码行数:25,代码来源:spp_net.py


示例7: encode

	def encode(self, state_below):
		"""
		:development:
			(1) may need to prepend encoding_length * padding array to the state_below to produce the same length sequence as state_below
			(2) can return an offset encoding by only returing certain indices of the encoding (though this is pretty wasteful)

		:type state_below: 2d tensor
		:param state_below: the enitre sequence of states from the layer below the current one

		:type rval: 2d tensor
		:param rval: an encoding of the state_below (the entire sequence of state) to be passed to the above layer
		"""

		total_sequence_length = T.cast(state_below.shape[0], theano.config.floatX)
		self.n_encodings = T.cast(T.ceil(total_sequence_length / self.encoding_length), 'int32')
		self.n_padding_timesteps = T.cast(self.n_encodings * self.encoding_length - total_sequence_length, 'int32')
		zeros = T.alloc(np.cast[theano.config.floatX](0), self.n_padding_timesteps, self.n_vis)
		state_below = T.concatenate((zeros, state_below))

		Wxh = self.Wxh
		bxh = self.bxh
		Whhe = self.Whhe

		state_below = state_below.reshape((self.encoding_length, self.n_encodings, self.n_vis))
		state_below = T.dot(state_below, Wxh) + bxh
		
		# a single output will be n_encoding rows with n_hid features each
		encoding_0 = T.alloc(np.cast[theano.config.floatX](0), self.n_encodings, self.n_hid)

		encodings, updates = scan(fn=self.encode_step, sequences=[state_below], outputs_info=[encoding_0], non_sequences=[Whhe])
		# encodings is a 3d vector (encoding_length, n_encodings, n_hid)
		# returns encodings[-1] in 2d vector shape = (n_encodings, n_hid)
		return encodings[-1]
开发者ID:wulfebw,项目名称:rnn,代码行数:33,代码来源:encoding_recurrent.py


示例8: get_pseudo_likelihood_cost

	def get_pseudo_likelihood_cost(self, updates):
		"""Stochastic approximation to the pseudo-likelihood"""

		# index of bit i in expression p(x_i | x_{\i})
		bit_i_idx = theano.shared(value=0, name='bit_i_idx')

		# binarize the input image by rounding to nearest integer
		xi = T.round(self.input)

		# calculate free energy for the given bit configuration
		fe_xi = self.free_energy(xi, self.scaling)

		# flip bit x_i of matrix xi and preserve all other bits x_{\i}
		# Equivalent to xi[:,bit_i_idx] = 1-xi[:, bit_i_idx], but assigns
		# the result to xi_flip, instead of working in place on xi.
		xi_flip = T.set_subtensor(xi[:, bit_i_idx], 1 - T.ceil(xi[:, bit_i_idx] / (xi[:, bit_i_idx] + 1)))

		# calculate free energy with bit flipped
		fe_xi_flip = self.free_energy(xi_flip, self.scaling)

		# equivalent to e^(-FE(x_i)) / (e^(-FE(x_i)) + e^(-FE(x_{\i})))
		cost = T.mean(self.n_visible * T.log(T.nnet.sigmoid(fe_xi_flip - fe_xi)))

		# increment bit_i_idx % number as part of updates
		updates[bit_i_idx] = (bit_i_idx + 1) % self.n_visible

		return cost
开发者ID:MultiPath,项目名称:rsm,代码行数:27,代码来源:fast_rsm.py


示例9: get_output_for

    def get_output_for(self, input, **kwargs):
        p = self.p
        k = self.k
        nbatches = input.shape[0]
        x_len = self.x_len
        # x_len = 30
        # x = input.reshape((nbatches, x_len))
        x = input.reshape((nbatches, x_len))

        p_floor = T.floor(p)
        p_ceil = T.ceil(p)
        
        # Deltas
        p_delta = p - p_floor
        ep_delta = T.exp(k*-p_delta)

        p2_delta = 1 - p_delta
        ep2_delta = T.exp(k*-p2_delta)

        p0_delta = 1 + p_delta
        ep0_delta = T.exp(k*-p0_delta)

        ep_sum = ep_delta + ep2_delta + ep0_delta

        perm1 = x[:, (T.cast(p_floor, 'int32'))%x_len]
        perm2 = x[:, (T.cast(p_ceil, 'int32')+1)%x_len]
        perm0 = x[:, (T.cast(p_floor, 'int32')-1)%x_len]

        perm1_factor = ep_delta * perm1
        perm2_factor = ep2_delta * perm2
        perm3_factor = ep0_delta * perm0
        res = (perm1_factor + perm2_factor + perm3_factor) / ep_sum
        return res.reshape(input.shape)
开发者ID:zenna,项目名称:ig,代码行数:33,代码来源:permute.py


示例10: get_output_for

    def get_output_for( self, inputs ,**kwargs ):
        # For each ROI R = [batch_index x1 y1 x2 y2]: max pool over R
        input = inputs[0]
        boxes = inputs[1]
        batch = T.shape (input)[0]
        channels = T.shape (input)[1]
        height = T.shape( input )[2]
        width = T.shape( input )[3]
        num_boxes = T.shape(boxes)[0]
        output = T.zeros((batch * num_boxes , channels, self.num_features))

        for idbb,bb in enumerate(range(num_boxes)):
            batch_ind = bb[0]

            pool_list = []
            #for pool_dim in self.pool_dims:
            start_w = T.clip(T.floor(bb[1] * self.sp_scale),0,width)
            start_h = T.clip(T.floor(bb[2] * self.sp_scale),0,heigth)
            end_w = T.clip(T.ceil(bb[3] * self.sp_scale),0,width)
            end_h = T.clip(T.ceil(bb[4] * self.sp_scale),0,height)

            w = T.max(end_w - start_w +1,1)
            h = T.amx(end_h - start_h +1,1)

            start_samples_y,start_sample_x = T.floor(_meshgrid(start_h,end_h,pool_dims+1,start_w,end_w,pool_dims+1))
            end_samples_y,end_sample_x = T.ceil(_meshgrid(start_h,end_h,pool_dims+1,start_w,end_w,pool_dims+1))

            input[batch_ind,:,np.floor(py):np.ceil(samples_y[idy+1]),np.floor(px):np.ceil(samples_x[idx+1])]
            
            #T.max()

            #for idx,px in enumerate(samples_x[:-1]):
            #    for idy,py in enumerate(samples_y[:-1]):

             #       (pool.dnn_pool( input[batch_ind,:,np.floor(py):np.ceil(samples_y[idy+1]),np.floor(px):np.ceil(samples_x[idx+1])],(0,0),(None,None),'max', (0,0) )).flatten(2)

                #sz_w = ( w - 1 ) // pool_dim
                #sz_h = ( h - 1 ) // pool_dim

                #str_h = w // pool_dim
                #str_w = h // pool_dim

                #pool = dnn.dnn_pool( input[bb[0],:,start_h:end_h+1,start_w:end_w+1], (sz_h,sz_w),                 (str_h,str_w), 'max', (0,0) ).flatten(2)
        pool_list.append( pool )
        output[idbb] = T.transpose(T.concatenate( pool_list, axis=1 )) #not efficient but for the moment is ok!
        #if everything is correct this vector should be ordered as in fast RCNN    
        return output
开发者ID:marcopede,项目名称:lasagneRCNN,代码行数:47,代码来源:SPP.py


示例11: compileActivation

    def compileActivation(self, net, layerNum):
        variable = net.x if layerNum == 0 else net.varArrayA[layerNum - 1]

        #Calc shapes for reshape function on-the-fly. Assume we have square images as input.
        sX = T.cast(T.sqrt(T.shape(variable)[0] / self.kernel_shape[1]), 'int16')

        #Converts input from 2 to 4 dimensions
        Xr = T.reshape(variable.T, (T.shape(variable)[1], self.kernel_shape[1], sX, sX))

        if self.optimized:
            out_size = T.cast(
                T.ceil((T.shape(Xr)[-1] - T.shape(net.varWeights[layerNum]['w'])[-1] + 1) / np.float32(self.stride)),
                'int32')

            conv_op = FilterActs(stride=self.stride)
            input_shuffled = Xr.dimshuffle(1, 2, 3, 0)  # bc01 to c01b
            filters_shuffled = net.varWeights[layerNum]['w'].dimshuffle(1, 2, 3, 0)  # bc01 to c01b
            filters_flipped = filters_shuffled[:, ::-1, ::-1, :] # flip rows and columns
            contiguous_input = gpu_contiguous(input_shuffled)
            contiguous_filters = gpu_contiguous(filters_flipped *
                                                (net.dropOutVectors[layerNum].dimshuffle('x', 0, 1, 'x') if self.dropout else 1.0))
            a = conv_op(contiguous_input, contiguous_filters)
            a = a[:, :out_size, :out_size, :]
            #Add bias
            a = a + net.varWeights[layerNum]['b'].dimshuffle(0, 'x', 'x', 'x')
        else:
            a = T.nnet.conv2d(Xr, net.varWeights[layerNum]['w'] *
                              (net.dropOutVectors[layerNum].dimshuffle('x', 'x', 0, 1) if self.dropout else 1.0),
                              border_mode='valid',
                              subsample=(self.stride, self.stride))
            #Add bias
            a = a + net.varWeights[layerNum]['b'].dimshuffle('x', 0, 'x', 'x')

        if self.pooling:
            if self.optimized:
                #Pooling
                # ds - side of square pool window
                # stride - Defines the stride size between successive pooling squares.
                # Setting this parameter smaller than sizeX produces overlapping pools.
                # Setting it equal to sizeX gives the usual, non-overlapping pools. Values greater than sizeX are not allowed.
                pool_op = MaxPool(ds=self.pooling_shape, stride=self.pooling_shape)

                contiguous_input = gpu_contiguous(a)
                a = pool_op(contiguous_input)
                a = a.dimshuffle(3, 0, 1, 2)       # c01b to bc01
            else:
                #a = downsample.max_pool_2d(a, (self.pooling_shape, self.pooling_shape), ignore_border=False)
                a = pool.max_pool2D(a, (self.pooling_shape, self.pooling_shape), ignore_border=False)
        else:
            if self.optimized:
                a = a.dimshuffle(3, 0, 1, 2)       # c01b to bc01

        a = T.flatten(a, outdim=2).T

        #Sigmoid
        a = self.activation(a, self.pool_size)

        net.varArrayA.append(a)
开发者ID:SoftServeSAG,项目名称:DRL,代码行数:58,代码来源:fTheanoNNclassCORE.py


示例12: _build_expression

    def _build_expression(self, input_expression=None):
        if self.pool_type not in ['max', 'avg']:
            raise NotImplementedError(
                'Pooling only implemented for max and avg')

        if input_expression is None:
            self.input_ = T.tensor4(dtype=self.input_dtype)
        else:
            self.input_ = input_expression

        # Replicating caffe style pooling means zero padding
        # then strided pooling with ignore_border=True
        if self.padding in [0, (0, 0)]:
            padded_input = self.input_
        else:
            zero_padder = ZeroPad(padding=self.padding)
            zero_padder._build_expression(self.input_)
            padded_input = zero_padder.expression_
        if self.pool_type == 'max':
            pooled = fancy_max_pool(padded_input,
                                    self.pool_shape, self.pool_stride,
                                    ignore_border=False)
        elif self.pool_type == 'avg':
            # self.pool_shape needs to be a tuple
            avg_kernel = T.cast(T.ones((1, 1) + self.pool_shape,
                                dtype=self.input_.dtype
                                ) / np.prod(self.pool_shape),
                                self.input_.dtype)
            n_imgs = self.input_.shape[0]
            n_channels = self.input_.shape[1]
            conv_output = T.nnet.conv2d(
                padded_input.reshape((n_imgs * n_channels, 1,
                                      padded_input.shape[2],
                                      padded_input.shape[3])),
                avg_kernel, subsample=self.pool_stride)
            pooled = conv_output.reshape((n_imgs, n_channels,
                                         conv_output.shape[2],
                                         conv_output.shape[3]))

        # A caffe quirk: The output shape is (for width, analogous for h:)
        # ceil((w + 2 * pad_w - kernel_w) / stride_w) + 1, instead of floor
        # With floor, ignore_border=True would have yielded the exact result
        # With ceil, sometimes we need an extra column and/or line. So we do
        # ignore_border=False and then crop to the right shape. Since the
        # shape is dynamic we need to first calculate it:

        # padding gotta be a tuple too
        pad = T.constant(self.padding)
        # pad = T.constant(zero_padder.padding_)
        # supposing here that self.pool_shape is a tuple. Should check
        pool_shape = T.constant(self.pool_shape)
        # stride hopefully a tuple, too
        pool_stride = T.constant(self.pool_stride, dtype='float64')
        float_shape = (self.input_.shape[2:4] + 2 * pad
                       - pool_shape) / pool_stride + 1
        output_shape = T.cast(T.ceil(float_shape), dtype='int64')
        self.expression_ = pooled[:, :, 0:output_shape[0],
                                        0:output_shape[1]]
开发者ID:bin2000,项目名称:sklearn-theano,代码行数:58,代码来源:base.py


示例13: gaussian_kernel_default_radius

def gaussian_kernel_default_radius(sigma, window_radius=None):
    if window_radius is None:
        radius = T.cast(T.max(T.ceil(3*sigma)), 'int32')
        if type(sigma) in (float, int):
            return int(radius.eval())
        else:
            return radius
    else:
        return window_radius
开发者ID:nebw,项目名称:beras,代码行数:9,代码来源:filters.py


示例14: get_hidden_values

 def get_hidden_values(self, input, batch_size):
     self.indices_high = T.ceil(self.indices).astype('int8')
     self.indices_low = T.floor(self.indices).astype('int8')
     self.factors_high = self.W[self.indices_high]
     self.factors_low = self.W[self.indices_low]
     self.factors = (self.factors_high - self.factors_low) * (self.indices - self.indices_low) / \
                    (self.indices_high - self.indices_low + 1E-5) + self.factors_low
     self.output = T.sum(self.x * T.transpose(self.factors).dimshuffle(0, 'x', 1), axis=2) / \
                   (self.length + 1.0).dimshuffle(0, 'x')
开发者ID:cedricdeboom,项目名称:RepresentationLearning,代码行数:9,代码来源:NN_layers.py


示例15: _ppf

    def _ppf(self, p):
        """
        The percentile point function (the inverse of the cumulative
        distribution function) of the discrete Weibull distribution.
        """
        q = self.q
        beta = self.beta

        return (tt.ceil(tt.power(tt.log(1 - p) / tt.log(q), 1. / beta)) - 1).astype('int64')
开发者ID:bballamudi,项目名称:pymc3,代码行数:9,代码来源:discrete.py


示例16: set_k_max

def set_k_max(layer, k_top, layer_position, nb_layers, sentence_length):
    """
    Set k_max based on the number of convolutional layers,
    and the layer position in the network.
    http://nal.co/papers/Kalchbrenner_DCNN_ACL14
    """
    alpha = (nb_layers - layer_position) * 1. / nb_layers
    layer.k_max = T.maximum(
        k_top,
        T.cast(T.ceil(sentence_length * alpha), 'int32')
    )
开发者ID:geshiming,项目名称:UltraDeep,代码行数:11,代码来源:pooling.py


示例17: get_stencil

    def get_stencil(self, t, r=None, texp=None):
        if r is None or texp is None:
            return tt.shape_padright(t)

        z = tt.zeros_like(self.a)
        r = tt.as_tensor_variable(r)
        R = self.r_star + z
        hp = 0.5 * self.period

        if self.ecc is None:
            # Equation 14 from Winn (2010)
            k = r / self.r_star
            arg1 = tt.square(1 + k) - tt.square(self.b)
            arg2 = tt.square(1 - k) - tt.square(self.b)
            factor = R / (self.a * self.sin_incl)
            hdur1 = hp * tt.arcsin(factor * tt.sqrt(arg1)) / np.pi
            hdur2 = hp * tt.arcsin(factor * tt.sqrt(arg2)) / np.pi
            ts = [-hdur1, -hdur2, hdur2, hdur1]
            flag = z

        else:
            M_contact1 = self.contact_points_op(
                self.a, self.ecc, self.cos_omega, self.sin_omega,
                self.cos_incl + z, self.sin_incl + z, R + r)
            M_contact2 = self.contact_points_op(
                self.a, self.ecc, self.cos_omega, self.sin_omega,
                self.cos_incl + z, self.sin_incl + z, R - r)

            flag = M_contact1[2] + M_contact2[2]

            ts = [
                tt.mod((M_contact1[0]-self.M0)/self.n+hp, self.period)-hp,
                tt.mod((M_contact2[0]-self.M0)/self.n+hp, self.period)-hp,
                tt.mod((M_contact2[1]-self.M0)/self.n+hp, self.period)-hp,
                tt.mod((M_contact1[1]-self.M0)/self.n+hp, self.period)-hp
            ]

        start = self.period * tt.floor((tt.min(t) - self.t0) / self.period)
        end = self.period * (tt.ceil((tt.max(t) - self.t0) / self.period) + 1)
        start += self.t0
        end += self.t0
        tout = []
        for i in range(4):
            if z.ndim < 1:
                tout.append(ts[i] + tt.arange(start, end, self.period))
            else:
                tout.append(theano.scan(
                    fn=lambda t0, s0, e0, p0: t0 + tt.arange(s0, e0, p0),
                    sequences=[ts[i], start, end, self.period],
                )[0].flatten())

        ts = tt.sort(tt.concatenate(tout))
        return ts, flag
开发者ID:dfm,项目名称:exoplanet,代码行数:53,代码来源:keplerian.py


示例18: dynamic_k_max_pooling

def dynamic_k_max_pooling(input, sent_sizes, k_max_factor, k_max_final):
  """
    k_max_factor -- multiplied by sentence_sizes gives the value of kmax for each sentence
  """
  # Unroll input into (batch_size x nchannels x nwords) x ndim
  nbatches, nchannels, nwords, ndim = input.shape[0], input.shape[1], input.shape[2], input.shape[3]
  x = input.dimshuffle(0,1,3,2)

  sent_sizes = T.cast(T.ceil(sent_sizes * k_max_factor), dtype='int32')
  sent_sizes = T.maximum(sent_sizes, k_max_final)
  # sent_sizes_matrix = T.repeat(sent_sizes, nwords, axis=1)
  sent_sizes_matrix = T.repeat(sent_sizes.dimshuffle(0, 'x'), nwords, axis=1)

  idx = T.arange(nwords).dimshuffle('x', 0)
  idx_matrix = T.repeat(idx, nbatches, axis=0)

  sent_sizes_mask = T.lt(idx_matrix, sent_sizes_matrix)[:,::-1]

  neighborsArgSorted = T.argsort(x, axis=3)
  neighborsArgSorted_masked = ((neighborsArgSorted + 1) * sent_sizes_mask.dimshuffle(0,'x','x',1)) - 1
  neighborsArgSorted_masked_sorted = neighborsArgSorted_masked.sort(axis=3)

  nwords_max = T.cast(T.ceil(nwords * k_max_factor), 'int32')
  # print nwords_max.eval()
  neighborsArgSorted_masked_sorted_clipped = neighborsArgSorted_masked_sorted[:,:,:,-nwords_max:]

  ax0 = T.repeat(T.arange(nbatches), nchannels*ndim*nwords_max)
  ax1 = T.repeat(T.arange(nchannels), ndim * nwords_max).dimshuffle('x', 0)
  ax1 = T.repeat(ax1, nbatches, axis=0).flatten()
  ax2 = T.repeat(T.arange(ndim), nwords_max, axis=0).dimshuffle('x', 'x', 0)
  ax2 = T.repeat(ax2, nchannels, axis=1)
  ax2 = T.repeat(ax2, nbatches, axis=0).flatten()
  ax3 = neighborsArgSorted_masked_sorted_clipped.flatten()

  pooled_out = x[ax0, ax1, ax2, ax3]
  pooled_out = pooled_out.reshape((nbatches, nchannels, ndim, nwords_max)).dimshuffle(0,1,3,2)

  return pooled_out
开发者ID:BinbinBian,项目名称:deep-qa,代码行数:38,代码来源:conv1d.py


示例19: get_hidden_values

    def get_hidden_values(self, input):

        # convolve input feature maps with filters
        self.conv_out = conv.conv2d(
            input=input, filters=self.W, border_mode="full", filter_shape=self.kshp, image_shape=self.imshp
        )

        # k-max pooling.
        k = T.cast(T.max((self.k_Top, T.ceil(self.factor * self.s))), "int32")
        pool_shape = self.conv_out.shape
        pool = self.kmaxPool(self.conv_out, pool_shape, k)

        output = T.tanh(pool + self.b.dimshuffle("x", 0, "x", "x"))
        self.shape = output.shape

        return output
开发者ID:asgarJ,项目名称:Convolutional-AutoEnconders,代码行数:16,代码来源:SCAE_Movie.py


示例20: R2_RNN_block

def R2_RNN_block(tparams,inputs,prefix=None,name='r2_rnn',std=True):
	prefix=GetPrefix(prefix,name);
	n_steps=inputs.shape[0];
	n_samples=inputs.shape[1];
	x_size=inputs.shape[2];	

	r_steps=T.ceil(T.log2(n_steps)).astype('uint32');
	r_steps=T.arange(r_steps);
	# r_steps=r_steps.reshape([r_steps.shape[0],1]);

	
	def _step_inner(index,num,inps):
		index=index*2;
		index_=T.minimum(index+2,num);

		h=RNN_layer(tparams,inps[index:index_,:,:],prefix=prefix,name=None,std=False);
		return h[-1,:,:];
	
	def _step(r_step,num,inps,std=True):
		n=num;
		steps=T.arange((n+1)/2);
		# steps=steps.reshape([steps.shape[0],1]);

		out,updates=theano.scan(lambda index,num,inps:_step_inner(index,num,inps), 
							sequences=[steps], 
							outputs_info=None,
							non_sequences=[num,inps],
							name=_p(prefix,'inner_scan'),
							n_steps=steps.shape[0],
							profile=False);

		# if std:	out=standardize(out);
		num=out.shape[0];
		h=T.zeros_like(inps);
		h=T.set_subtensor(h[:num],out);
		return num,h;
		# return out;
	
	if std:	inputs=standardize(inputs);
	out,updates=theano.reduce(lambda r_step,num,inps:_step(r_step,num,inps), 
							sequences=r_steps, 
							outputs_info=[inputs.shape[0],inputs],
							# non_sequences=inputs,
							name=_p(prefix,'scan')
							);
	return out[1][:out[0]];
开发者ID:smajida,项目名称:action-recognition-based-on-focus-selection-and-multiChannel,代码行数:46,代码来源:layer.py



注:本文中的theano.tensor.ceil函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。


鲜花

握手

雷人

路过

鸡蛋
该文章已有0人参与评论

请发表评论

全部评论

专题导读
上一篇:
Python tensor.clip函数代码示例发布时间:2022-05-27
下一篇:
Python tensor.cast函数代码示例发布时间:2022-05-27
热门推荐
阅读排行榜

扫描微信二维码

查看手机版网站

随时了解更新最新资讯

139-2527-9053

在线客服(服务时间 9:00~18:00)

在线QQ客服
地址:深圳市南山区西丽大学城创智工业园
电邮:jeky_zhao#qq.com
移动电话:139-2527-9053

Powered by 互联科技 X3.4© 2001-2213 极客世界.|Sitemap