• 设为首页
  • 点击收藏
  • 手机版
    手机扫一扫访问
    迪恩网络手机版
  • 关注官方公众号
    微信扫一扫关注
    公众号

Python tensor.concatenate函数代码示例

原作者: [db:作者] 来自: [db:来源] 收藏 邀请

本文整理汇总了Python中theano.tensor.concatenate函数的典型用法代码示例。如果您正苦于以下问题:Python concatenate函数的具体用法?Python concatenate怎么用?Python concatenate使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。



在下文中一共展示了concatenate函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。

示例1: recurrence

    def recurrence( sample_z_t, sample_x_t, h_tm1_enc, h_tm1_dec, c_tm1_enc, c_tm1_dec,  mu_z_t,  mu_x_tm1, coeff_x_tm1,  v):
        v_hat = v - T.sum(( coeff_x_tm1.dimshuffle(0,'x',1) *  ( mu_x_tm1 + (T.exp(b_sig_x) * sample_x_t).reshape((batch_size, n_visible*n_gmm)) ).reshape((batch_size, n_visible, n_gmm)) ), axis = -1 ) #error input
        r_t = T.concatenate( [v , v_hat], axis = 1 ) 
        
        # v_enc = [r_t, h_tm1_dec]
        v_enc = T.concatenate( [r_t, h_tm1_dec] , axis = 1)
        
        #Generate h_t_enc = RNN_enc(h_tm1_enc, v_enc)
        i_t_enc = T.nnet.sigmoid(bi_enc + T.dot(c_tm1_enc, Wci_enc) + T.dot(h_tm1_enc, Whi_enc) + T.dot(v_enc, Wvi_enc))
        f_t_enc = T.nnet.sigmoid(bf_enc + T.dot(c_tm1_enc, Wcf_enc) + T.dot(h_tm1_enc, Whf_enc) + T.dot(v_enc, Wvf_enc))
        c_t_enc = (f_t_enc * c_tm1_enc) + ( i_t_enc * T.tanh( T.dot(v_enc, Wvc_enc) + T.dot( h_tm1_enc, Whc_enc) + bc_enc ))
        o_t_enc = T.nnet.sigmoid(bo_enc + T.dot(c_t_enc, Wco_enc) + T.dot(h_tm1_enc, Who_enc) + T.dot(v_enc, Wvo_enc))
        h_t_enc = o_t_enc * T.tanh( c_t_enc )
        
        # Get z_t
        mu_z_t = T.dot(h_t_enc, Wh_enc_mu_z ) + b_mu_z
        #sigma_z_t = T.dot(h_t_enc, Wh_enc_sig_z ) + b_sig_z
        #sample =  theano_rng.normal(size=mew_t.shape, avg = 0, std = 1, dtype=theano.config.floatX)
        z_t = mu_z_t + (T.exp(b_sig_z) * sample_z_t).reshape((batch_size,n_z)) 
        # Generate h_t_dec = RNN_dec(h_tm1_dec, z_t) 
        i_t_dec = T.nnet.sigmoid(bi_dec + T.dot(c_tm1_dec, Wci_dec) + T.dot(h_tm1_dec, Whi_dec) + T.dot(z_t, Wzi_dec))
        f_t_dec = T.nnet.sigmoid(bf_dec + T.dot(c_tm1_dec, Wcf_dec) + T.dot(h_tm1_dec, Whf_dec) + T.dot(z_t , Wzf_dec))
        c_t_dec = (f_t_dec * c_tm1_dec) + ( i_t_dec * T.tanh( T.dot(z_t, Wzc_dec) + T.dot( h_tm1_dec, Whc_dec) + bc_dec ))
        o_t_dec = T.nnet.sigmoid(bo_dec + T.dot(c_t_dec, Wco_dec) + T.dot(h_tm1_dec, Who_dec) + T.dot(z_t, Wzo_dec))
        h_t_dec = o_t_dec * T.tanh( c_t_dec )

        # Get w_t
        mu_x_t = mu_x_tm1 + T.dot(h_t_dec, Wh_dec_mu_x) + b_mu_x
        coeff_x_t = T.nnet.softmax( T.dot(h_t_dec, Wh_dec_coeff_x) + b_coeff_x)
        #sigma_x_t = sigma_x_tm1 + T.dot(h_t_dec, Wh_dec_sigma_x) + b_sig_x

        return [ h_t_enc, h_t_dec, c_t_enc, c_t_dec,  mu_z_t,  mu_x_t , coeff_x_t]
开发者ID:nehz,项目名称:NeuralNet,代码行数:32,代码来源:new_draw_gmm.py


示例2: output_probabilistic

    def output_probabilistic(self, m_w_previous, v_w_previous):
        if (self.non_linear):
            m_in = self.m_w - m_w_previous
            v_in = self.v_w
            # We compute the mean and variance after the ReLU activation
            lam = self.lam
            v_1 = 1 + 2*lam*v_in
            v_1_inv = v_1**-1

            s_1 = T.prod(v_1,axis=1)**-0.5
            v_2 = 1 + 4*lam*v_in
            v_2_inv = v_2**-1
            s_2 = T.prod(v_2,axis=1)**-0.5
            v_inv = v_in**-1
            exponent1 = m_in**2*(1 - v_1_inv)*v_inv
            exponent1 = T.sum(exponent1,axis=1)
            exponent2 = m_in**2*(1 - v_2_inv)*v_inv
            exponent2 = T.sum(exponent2,axis=1)
            m_a = s_1*T.exp(-0.5*exponent1)
            v_a = s_2*T.exp(-0.5*exponent2) - m_a**2

            return (m_a, v_a)

        else:
            m_w_previous_with_bias = \
            T.concatenate([ m_w_previous, T.alloc(1, 1) ], 0)
            v_w_previous_with_bias = \
            T.concatenate([ v_w_previous, T.alloc(0, 1) ], 0)

            m_linear = T.dot(self.m_w, m_w_previous_with_bias) / T.sqrt(self.n_inputs)
            v_linear = (T.dot(self.v_w, v_w_previous_with_bias) + \
                T.dot(self.m_w**2, v_w_previous_with_bias) + \
                T.dot(self.v_w, m_w_previous_with_bias**2)) / self.n_inputs
            return (m_linear, v_linear)
开发者ID:jshe857,项目名称:thesis-rbfnn,代码行数:34,代码来源:network_layer.py


示例3: get_uhs_operator

def get_uhs_operator(uhs, depth, n_hidden, rhos):
    """

    :param uhs:
    :param depth:
    :param n_hidden:
    :param rhos: can be shared variable or constant of shape (depth, )!!
    :return:
    """
    # Will use a Fourier matrix (will be O(n^2)...)
    # Doesn't seem to slow things down much though!
    exp_phases = [T.cos(uhs), T.sin(uhs)]
    neg_exp_phases = [T.cos(uhs[:, ::-1]), -T.sin(uhs[:, ::-1])]
    ones_ = [T.ones((depth, 1), dtype=theano.config.floatX), T.zeros((depth, 1), dtype=theano.config.floatX)]

    rhos_reshaped = T.reshape(rhos, (depth, 1), ndim=2)
    rhos_reshaped = T.addbroadcast(rhos_reshaped, 1)

    eigvals_re = rhos_reshaped * T.concatenate((ones_[0], exp_phases[0], -ones_[0], neg_exp_phases[0]), axis=1)
    eigvals_im = rhos_reshaped * T.concatenate((ones_[1], exp_phases[1], -ones_[1], neg_exp_phases[1]), axis=1)
    phase_array = -2 * np.pi * np.outer(np.arange(n_hidden), np.arange(n_hidden)) / n_hidden
    f_array_re_val = np.cos(phase_array) / n_hidden
    f_array_im_val = np.sin(phase_array) / n_hidden
    f_array_re = theano.shared(f_array_re_val.astype(theano.config.floatX), name="f_arr_re")
    f_array_im = theano.shared(f_array_im_val.astype(theano.config.floatX), name="f_arr_im")

    a_k = T.dot(eigvals_re, f_array_re) + T.dot(eigvals_im, f_array_im)
    uhs_op = rep_vec(a_k, n_hidden, n_hidden)  # shape (depth, 2 * n_hidden - 1)

    return uhs_op
开发者ID:harpone,项目名称:DerpRNN,代码行数:30,代码来源:utils.py


示例4: get_bivariate_normal_spec

def get_bivariate_normal_spec():
    X1,X2,mu,sigma = [T.scalar('X1'),T.scalar('X2'), T.vector('mu'), T.matrix('sigma')]
    GaussianDensitySpec = FunctionSpec(variables=[X1, X2, mu, sigma],
                                       output_expression = -0.5*T.dot(T.dot((T.concatenate([X1.dimshuffle('x'),X2.dimshuffle('x')])-mu).T,
                                                                            nlinalg.matrix_inverse(sigma)),
                                                                      (T.concatenate([X1.dimshuffle('x'),X2.dimshuffle('x')])-mu)))
    return GaussianDensitySpec
开发者ID:grahamsdoman,项目名称:pysterior,代码行数:7,代码来源:energy.py


示例5: _create_maximum_activation_update

def _create_maximum_activation_update(output, record, streamindex, topn):
    """
    Calculates update of the topn maximums for one batch of outputs.
    """
    dims, maximums, indices, snapshot = record
    counters = tensor.tile(tensor.shape_padright(
        tensor.arange(output.shape[0]) + streamindex), (1, output.shape[1]))
    if len(dims) == 1:
        # output is a 2d tensor, (cases, units) -> activation
        tmax = output
        # counters is a 2d tensor broadcastable (cases, units) -> case_index
        tind = counters
    else:
        # output is a 4d tensor: fmax flattens it to 3d
        fmax = output.flatten(ndim=3)
        # fargmax is a 2d tensor containing rolled maximum locations
        fargmax = fmax.argmax(axis=2)
        # fetch the maximum. tmax is 2d, (cases, units) -> activation
        tmax = _apply_index(fmax, fargmax, axis=2)
        # targmax is a tuple that separates rolled-up location into (x, y)
        targmax = divmod(fargmax, dims[2])
        # tind is a 3d tensor (cases, units, 3) -> case_index, maxloc
        # this will match indices which is a 3d tensor also
        tind = tensor.stack((counters, ) + targmax, axis=2)
    cmax = tensor.concatenate((maximums, tmax), axis=0)
    cind = tensor.concatenate((indices, tind), axis=0)
    cargsort = (-cmax).argsort(axis=0)[:topn]
    newmax = _apply_perm(cmax, cargsort, axis=0)
    newind = _apply_perm(cind, cargsort, axis=0)
    updates = [(maximums, newmax), (indices, newind)]
    if snapshot:
        csnap = tensor.concatenate((snapshot, output), axis=0)
        newsnap = _apply_perm(csnap, cargsort, axis=0)
        updates.append((snapshot, newsnap))
    return updates
开发者ID:davidbau,项目名称:net-intent,代码行数:35,代码来源:maxact.py


示例6: forward

    def forward(self, x, hc):
        """
        :param x: the input vector or matrix
        :param hc: the vector/matrix of [ c_tm1, h_tm1 ], i.e. hidden state and visible state concatenated together
        :return: [ c_t, h_t ] as a single concatenated vector/matrix
        """
        n_in, n_out, activation = self.n_in, self.n_out, self.activation

        if hc.ndim > 1:
            c_tm1 = hc[:, :n_out]
            h_tm1 = hc[:, n_out:]
        else:
            c_tm1 = hc[:n_out]
            h_tm1 = hc[n_out:]

        in_t = self.in_gate.forward(x, h_tm1)
        forget_t = self.forget_gate.forward(x, h_tm1)
        out_t = self.out_gate.forward(x, h_tm1)

        c_t = forget_t * c_tm1 + in_t * self.input_layer.forward(x, h_tm1)
        h_t = out_t * T.tanh(c_t)

        if hc.ndim > 1:
            return T.concatenate([c_t, h_t], axis=1)
        else:
            return T.concatenate([c_t, h_t])
开发者ID:hiroki13,项目名称:neural-sentence-matching-system,代码行数:26,代码来源:basic.py


示例7: recurrence

        def recurrence( sample_z_t, sample_x_t, h_tm1_enc, h_tm1_dec, c_tm1_enc, c_tm1_dec,  mu_z_t,  sigma_z_t, mu_x_tm1, sigma_x_tm1,  v):
            if v is not None:
                v_hat = v -  ( mu_x_tm1 + (sigma_x_tm1 * sample_x_t.reshape((batch_size, n_visible)) ) )#error input
                r_t = T.concatenate( [v , v_hat], axis = 1 ) 
            else:
                v_hat = mu_x_tm1 -  ( mu_x_tm1 + (sigma_x_tm1 * sample_x_t.reshape((batch_size, n_visible)) ) )#error input
                r_t = T.concatenate( [mu_x_tm1 , v_hat], axis = 1 ) 
            # v_enc = [r_t, h_tm1_dec]
            v_enc = T.concatenate( [r_t, h_tm1_dec] , axis = 1)
        
            #Generate h_t_enc = RNN_enc(h_tm1_enc, v_enc)
            i_t_enc = T.nnet.sigmoid(bi_enc + T.dot(c_tm1_enc, Wci_enc) + T.dot(h_tm1_enc, Whi_enc) + T.dot(v_enc, Wvi_enc))
            f_t_enc = T.nnet.sigmoid(bf_enc + T.dot(c_tm1_enc, Wcf_enc) + T.dot(h_tm1_enc, Whf_enc) + T.dot(v_enc, Wvf_enc))
            c_t_enc = (f_t_enc * c_tm1_enc) + ( i_t_enc * T.tanh( T.dot(v_enc, Wvc_enc) + T.dot( h_tm1_enc, Whc_enc) + bc_enc ))
            o_t_enc = T.nnet.sigmoid(bo_enc + T.dot(c_t_enc, Wco_enc) + T.dot(h_tm1_enc, Who_enc) + T.dot(v_enc, Wvo_enc))
            h_t_enc = o_t_enc * T.tanh( c_t_enc )
        
            # Get z_t
            mu_z_t = T.dot(h_t_enc, Wh_enc_mu_z ) + b_mu_z
            sigma_z_t = sigma_b + T.nnet.softplus(T.dot(h_t_enc, Wh_enc_sig_z ) + b_sig_z)
            #sample =  theano_rng.normal(size=mew_t.shape, avg = 0, std = 1, dtype=theano.config.floatX)
            z_t = mu_z_t + (sigma_z_t * (sample_z_t.reshape((batch_size,n_z))) ) 
            # Generate h_t_dec = RNN_dec(h_tm1_dec, z_t) 
            i_t_dec = T.nnet.sigmoid(bi_dec + T.dot(c_tm1_dec, Wci_dec) + T.dot(h_tm1_dec, Whi_dec) + T.dot(z_t, Wzi_dec))
            f_t_dec = T.nnet.sigmoid(bf_dec + T.dot(c_tm1_dec, Wcf_dec) + T.dot(h_tm1_dec, Whf_dec) + T.dot(z_t , Wzf_dec))
            c_t_dec = (f_t_dec * c_tm1_dec) + ( i_t_dec * T.tanh( T.dot(z_t, Wzc_dec) + T.dot( h_tm1_dec, Whc_dec) + bc_dec ))
            o_t_dec = T.nnet.sigmoid(bo_dec + T.dot(c_t_dec, Wco_dec) + T.dot(h_tm1_dec, Who_dec) + T.dot(z_t, Wzo_dec))
            h_t_dec = o_t_dec * T.tanh( c_t_dec )

            # Get w_t
            mu_x_t = mu_x_tm1 + T.dot(h_t_dec, Wh_dec_mu_x) + b_mu_x
            sigma_x_t = sigma_b +  T.nnet.softplus(T.dot(h_t_dec, Wh_dec_sig_x) + b_sig_x)

            return [ h_t_enc, h_t_dec, c_t_enc, c_t_dec,  mu_z_t, sigma_z_t,  mu_x_t, sigma_x_t]
开发者ID:nehz,项目名称:NeuralNet,代码行数:34,代码来源:rnn-draw.py


示例8: __init__

    def __init__(self, input_ngram, input_sm, vocab_size, emb_dim, num_section, linear_W_emb=None, fix_emb=False, nonlinear=None, activation=None):
        
        global rng
        global init_range
        if linear_W_emb is None:
            # random initialize
            linear_W_emb = np.asarray(rng.uniform(
                low=-init_range, high=init_range, size=(vocab_size, emb_dim)), dtype=theano.config.floatX)
        else:
            # use the given model parameter
            given_vocab_size, given_emb_dim = linear_W_emb.shape
            assert(given_vocab_size == vocab_size and given_emb_dim == emb_dim)

        # shared variables
        self.W_emb = theano.shared(value=linear_W_emb, name='W_emb')

        # stack vectors
        input_ngram = T.cast(input_ngram, 'int32')
        input_sm = T.cast(input_sm, 'int32')

        # output is a matrix where each row correponds to a context_size embedding vector, and row number equals to batch size
        # output dimensions: batch_size * ((context_size + 1) * emb_dim)
        output_local = self.W_emb[input_ngram[:, :-1].flatten()].reshape(
            (input_ngram.shape[0], emb_dim * (input_ngram.shape[1] - 1)))  # self.W_emb.shape[1]
        
        sentence_lengths = input_sm[:,0]
        sentence_matrix = input_sm[:,1:]

        sentence_num = sentence_matrix.shape[0]
        global_length = sentence_matrix.shape[1]
        section_length = T.cast(T.ceil(global_length / float(num_section)), 'int32')

        # For the first section
        sentence_embeddings = T.mean(self.W_emb[sentence_matrix[:, :section_length].flatten()].reshape(
            (sentence_num, section_length, emb_dim)), axis=1)

        # For the rest sections
        for i in xrange(1, num_section):
            current_section = T.mean(self.W_emb[sentence_matrix[:, i*section_length:(i+1)*section_length].flatten()].reshape(
                (sentence_num, section_length, emb_dim)), axis=1)
            sentence_embeddings = T.concatenate([sentence_embeddings, current_section], axis=1)

        # get the sentence index for each ngram vector, and transform it to 0-based
        sentence_indeces = input_ngram[:,-1]
        base_index = sentence_indeces[0]
        sentence_indeces = sentence_indeces - base_index

        # the last column of output should be a weighted sum of the sentence
        # vectors
        output_global = sentence_embeddings[sentence_indeces.flatten()].reshape((sentence_indeces.shape[0], emb_dim * num_section))

        # handle non-linear layer
        if nonlinear is None or activation is None:
            self.output = T.concatenate([output_local, output_global], axis=1)
            # params is the word embedding matrix
            self.params = [self.W_emb] if not fix_emb else []
        else:
            self.non_linear_params, non_linear_output_global = addNonlinearLayer(output_global, emb_dim * num_section, nonlinear, activation)
            self.output = T.concatenate([output_local, non_linear_output_global], axis=1)
            self.params = [self.W_emb] + self.non_linear_params if not fix_emb else self.non_linear_params
开发者ID:lixiangnlp,项目名称:nnjm-global,代码行数:60,代码来源:model_util.py


示例9: recurrence

    def recurrence( sample_t, h_tm1_enc, h_tm1_dec, c_tm1_enc, c_tm1_dec, w_tm1, mew_t, sigma_t, v):
        v_hat = v - T.nnet.sigmoid(w_tm1) #error input
        r_t = T.concatenate( [v , v_hat], axis = 1 ) 
        
        # v_enc = [r_t, h_tm1_dec]
        v_enc = T.concatenate( [r_t, h_tm1_dec] , axis = 1)
        
        #Generate h_t_enc = RNN_enc(h_tm1_enc, v_enc)
        i_t_enc = T.nnet.sigmoid(bi_enc + T.dot(c_tm1_enc, Wci_enc) + T.dot(h_tm1_enc, Whi_enc) + T.dot(v_enc, Wvi_enc))
        f_t_enc = T.nnet.sigmoid(bf_enc + T.dot(c_tm1_enc, Wcf_enc) + T.dot(h_tm1_enc, Whf_enc) + T.dot(v_enc, Wvf_enc))
        c_t_enc = (f_t_enc * c_tm1_enc) + ( i_t_enc * T.tanh( T.dot(v_enc, Wvc_enc) + T.dot( h_tm1_enc, Whc_enc) + bc_enc ))
        o_t_enc = T.nnet.sigmoid(bo_enc + T.dot(c_t_enc, Wco_enc) + T.dot(h_tm1_enc, Who_enc) + T.dot(v_enc, Wvo_enc))
        h_t_enc = o_t_enc * T.tanh( c_t_enc )
        
        # Get z_t
        mew_t = T.dot(h_t_enc, Wh_enc_mew )
        sigma_t = T.dot(h_t_enc, Wh_enc_sig )
        #sample =  theano_rng.normal(size=mew_t.shape, avg = 0, std = 1, dtype=theano.config.floatX)
        z_t = mew_t + (T.exp(sigma_t) * sample_t )
        # Generate h_t_dec = RNN_dec(h_tm1_dec, z_t) 
        i_t_dec = T.nnet.sigmoid(bi_dec + T.dot(c_tm1_dec, Wci_dec) + T.dot(h_tm1_dec, Whi_dec) + T.dot(z_t, Wzi_dec))
        f_t_dec = T.nnet.sigmoid(bf_dec + T.dot(c_tm1_dec, Wcf_dec) + T.dot(h_tm1_dec, Whf_dec) + T.dot(z_t , Wzf_dec))
        c_t_dec = (f_t_dec * c_tm1_dec) + ( i_t_dec * T.tanh( T.dot(z_t, Wzc_dec) + T.dot( h_tm1_dec, Whc_dec) + bc_dec ))
        o_t_dec = T.nnet.sigmoid(bo_dec + T.dot(c_t_dec, Wco_dec) + T.dot(h_tm1_dec, Who_dec) + T.dot(z_t, Wzo_dec))
        h_t_dec = o_t_dec * T.tanh( c_t_dec )

        # Get w_t
        w_t = w_tm1 + T.dot(h_t_dec, Wh_dec_w)
        return [ h_t_enc, h_t_dec, c_t_enc, c_t_dec, w_t, mew_t, sigma_t]
开发者ID:nehz,项目名称:NeuralNet,代码行数:29,代码来源:new_draw.py


示例10: _join_global_RVs

def _join_global_RVs(global_RVs, global_order):
    if len(global_RVs) == 0:
        inarray_global = None
        uw_global = None
        replace_global = {}
        c_g = 0
    else:
        joined_global = tt.concatenate([v.ravel() for v in global_RVs])
        uw_global = tt.vector('uw_global')
        uw_global.tag.test_value = np.concatenate(
            [joined_global.tag.test_value, joined_global.tag.test_value]
        )

        inarray_global = joined_global.type('inarray_global')
        inarray_global.tag.test_value = joined_global.tag.test_value

        # Replace RVs with reshaped subvectors of the joined vector
        # The order of global_order is the same with that of global_RVs
        subvecs = [reshape_t(inarray_global[slc], shp).astype(dtyp)
                   for _, slc, shp, dtyp in global_order.vmap]
        replace_global = {v: subvec for v, subvec in zip(global_RVs, subvecs)}

        # Weight vector
        cs = [c for _, c in global_RVs.items()]
        oness = [tt.ones(v.ravel().tag.test_value.shape) for v in global_RVs]
        c_g = tt.concatenate([c * ones for c, ones in zip(cs, oness)])

    return inarray_global, uw_global, replace_global, c_g
开发者ID:bballamudi,项目名称:pymc3,代码行数:28,代码来源:advi_minibatch.py


示例11: _join_local_RVs

def _join_local_RVs(local_RVs, local_order):
    if len(local_RVs) == 0:
        inarray_local = None
        uw_local = None
        replace_local = {}
        c_l = 0
    else:
        joined_local = tt.concatenate([v.ravel() for v in local_RVs])
        uw_local = tt.vector('uw_local')
        uw_local.tag.test_value = np.concatenate([joined_local.tag.test_value,
                                                  joined_local.tag.test_value])

        inarray_local = joined_local.type('inarray_local')
        inarray_local.tag.test_value = joined_local.tag.test_value

        get_var = {var.name: var for var in local_RVs}
        replace_local = {
            get_var[var]: reshape_t(inarray_local[slc], shp).astype(dtyp)
            for var, slc, shp, dtyp in local_order.vmap
        }

        # Weight vector
        cs = [c for _, (_, c) in local_RVs.items()]
        oness = [tt.ones(v.ravel().tag.test_value.shape) for v in local_RVs]
        c_l = tt.concatenate([c * ones for c, ones in zip(cs, oness)])

    return inarray_local, uw_local, replace_local, c_l
开发者ID:bballamudi,项目名称:pymc3,代码行数:27,代码来源:advi_minibatch.py


示例12: diag_gauss

def diag_gauss(inpt):
    """Transfer function to turn an arary into sufficient statistics of a
    diagonal Gaussian.

    The first half of the input will be left unchanged, the second will be
    squared. the "split" into halves is performed along the second axis.

    Parameters
    ----------

    inpt : Theano tensor
        Array of shape ``(n, d)`` or ``(t, n, d)``.

    Returns
    -------

    output : Theano variable.
        Transformed input. Same shape as ``inpt``.
    """
    half = inpt.shape[-1] // 2
    if inpt.ndim == 3:
        mean, var = inpt[:, :, :half], inpt[:, :, half:]
        res = T.concatenate([mean, var ** 2 + 1e-8], axis=2)
    else:
        mean, var = inpt[:, :half], inpt[:, half:]
        res = T.concatenate([mean, var ** 2 + 1e-8], axis=1)
    return res
开发者ID:vinodrajendran001,项目名称:breze,代码行数:27,代码来源:transfer.py


示例13: get_unfolding_cost

    def get_unfolding_cost(self):
        ''' computes the unfolding rwconstructed cost (more than 2 inputs) '''
        x  = T.reshape(self.x, (-1, self.n_vector)) 
        yi = x[0];i=1
        for i in range(1, self.num):
        #while T.lt(i, self.num):
            xi = T.concatenate((yi, x[i]))
            yi = self.get_hidden_values(xi)
            i += 1
        # Save the deepest hidden value as output vactor
        self.vector = copy.deepcopy(yi)

        tmp = []
        i = 1
        for i in range(1, self.num):
        #while T.lt(i, self.num):
            zi = self.get_reconstructed(yi)
            t  = T.reshape(zi, (2, self.n_vector))
            tmp.append(t[1])
            yi = t[0]
            i += 1
        tmp.append(yi)
        tmp.reverse()
    
        x = self.x
        z = T.concatenate(tmp)
        
        # cross-entropy cost should be modified here.
        L = -T.sum( (0.5*x+0.5)*T.log(0.5*z+0.5) + (-0.5*x+0.5)*T.log(-0.5*z+0.5) )
        # squred cost.
        #L = -T.sum( (x-z)**2 )
        
        cost = T.mean(L) + 0.01*(self.W**2).sum()   # cost for a minibatch
        return cost 
开发者ID:MultiPath,项目名称:Dep-Compo,代码行数:34,代码来源:RAE.py


示例14: _build

        def _build(det_dropout):
            all_out_probs = []
            for encoding, lstmstack, encoded_melody, relative_pos in zip(self.encodings, self.lstmstacks, encoded_melodies, relative_posns):
                activations = lstmstack.do_preprocess_scan( timestep=T.tile(T.arange(n_time), (n_batch,1)) ,
                                                            relative_position=relative_pos,
                                                            cur_chord_type=chord_types,
                                                            cur_chord_root=chord_roots,
                                                            last_output=T.concatenate([T.tile(encoding.initial_encoded_form(), (n_batch,1,1)),
                                                                                encoded_melody[:,:-1,:] ], 1),
                                                            deterministic_dropout=det_dropout)

                out_probs = encoding.decode_to_probs(activations, relative_pos, self.bounds.lowbound, self.bounds.highbound)
                all_out_probs.append(out_probs)
            reduced_out_probs = functools.reduce((lambda x,y: x*y), all_out_probs)
            if self.normalize_artic_only:
                non_artic_probs = reduced_out_probs[:,:,:2]
                artic_probs = reduced_out_probs[:,:,2:]
                non_artic_sum = T.sum(non_artic_probs, 2, keepdims=True)
                artic_sum = T.sum(artic_probs, 2, keepdims=True)
                norm_artic_probs = artic_probs*(1-non_artic_sum)/artic_sum
                norm_out_probs = T.concatenate([non_artic_probs, norm_artic_probs], 2)
            else:
                normsum = T.sum(reduced_out_probs, 2, keepdims=True)
                normsum = T.maximum(normsum, constants.EPSILON)
                norm_out_probs = reduced_out_probs/normsum
            return Encoding.compute_loss(norm_out_probs, correct_notes, True)
开发者ID:Impro-Visor,项目名称:lstmprovisor-python,代码行数:26,代码来源:product_model.py


示例15: filter_and_prob

def filter_and_prob(inpt, transition, emission,
           visible_noise_mean, visible_noise_cov,
           hidden_noise_mean, hidden_noise_cov,
           initial_hidden, initial_hidden_cov):
    step = forward_step(
        transition, emission,
        visible_noise_mean, visible_noise_cov,
        hidden_noise_mean, hidden_noise_cov)

    hidden_mean_0 = T.zeros_like(hidden_noise_mean).dimshuffle('x', 0)
    hidden_cov_0 = T.zeros_like(hidden_noise_cov).dimshuffle('x', 0, 1)
    f0, F0, ll0 = step(inpt[0], hidden_mean_0, hidden_cov_0)
    replace = {hidden_noise_mean: initial_hidden, 
               hidden_noise_cov: initial_hidden_cov}
    f0 = theano.clone(f0, replace)
    F0 = theano.clone(F0, replace)
    ll0 = theano.clone(ll0, replace)

    (f, F, ll), _ = theano.scan(
        step,
        sequences=inpt[1:],
        outputs_info=[f0, F0, None])

    ll = ll.sum(axis=0)

    f = T.concatenate([T.shape_padleft(f0), f])
    F = T.concatenate([T.shape_padleft(F0), F])
    ll += ll0

    return f, F, ll
开发者ID:ddofer,项目名称:breze,代码行数:30,代码来源:lds.py


示例16: apply

 def apply(self, source_sentence, source_sentence_mask):
     """Creates the final list of annotations.
     
     Args:
         source_sentence (Variable): Source sentence with words in
                                     vector representation.
         source_sentence_mask (Variable): Source mask
     
     Returns:
         Variable. source annotations
     """
     # Time as first dimension
     base_representations,base_mask = self.base_encoder.apply(
                                                       source_sentence,
                                                       source_sentence_mask)
     annotations = []
     masks = []
     if self.add_direct:
         annotations.append(base_representations)
         masks.append(base_mask)
     for annotator in self.annotators:
         ann,mask = annotator.apply(base_representations,
                                    base_mask)
         annotations.append(ann)
         masks.append(mask)
     return tensor.concatenate(annotations), tensor.concatenate(masks)
开发者ID:ucam-smt,项目名称:sgnmt,代码行数:26,代码来源:encoder.py


示例17: getScores

    def getScores(self, args1, args2, l, n, relationProbs, neg1, neg2, entropy):
        weightedC1= T.dot(relationProbs, self.C1.dimshuffle(1, 0))
        weightedC2= T.dot(relationProbs, self.C2.dimshuffle(1, 0))

        left1 = self.leftMostFactorization(batchSize=l, args=args1, wC1=weightedC1)
        right1 = self.rightMostFactorization(batchSize=l, args=args2, wC2=weightedC2)
        one = left1 + right1

        u = T.concatenate([one + self.Ab[args1], one + self.Ab[args2]])
        logScoresP = T.log(T.nnet.sigmoid(u))
        allScores = logScoresP
        allScores = T.concatenate([allScores, entropy, entropy])

        negembed1 = self.A[neg1.flatten()].reshape((n, l, self.k))
        negembed2 = self.A[neg2.flatten()].reshape((n, l, self.k))
        negative1 = self.negLeftMostFactorization(batchSize=l,
                                                  negEmbed=negembed1,
                                                  wC1=weightedC1)
        negative2 = self.negRightMostFactorization(batchSize=l,
                                                  negEmbed=negembed2,
                                                  wC2=weightedC2)

        negOne = negative1.dimshuffle(1, 0) + right1
        negTwo = negative2.dimshuffle(1, 0) + left1
        g = T.concatenate([negOne + self.Ab[neg1], negTwo + self.Ab[neg2]])
        logScores = T.log(T.nnet.sigmoid(-g))
        allScores = T.concatenate([allScores, logScores.flatten()])

        return allScores
开发者ID:Simon-X,项目名称:relation-autoencoder,代码行数:29,代码来源:SelectionalPreferences.py


示例18: _setOutputs

	def _setOutputs(self) :
		outs = []
		for l in self.network.inConnections[self] :
			outs.append(l.outputs)
	
		self.outputs = tt.concatenate( outs, axis = 1 )
		self.testOutputs = tt.concatenate( outs, axis = 1 )
开发者ID:saikswaroop,项目名称:Mariana,代码行数:7,代码来源:layers.py


示例19: _pad_blanks

 def _pad_blanks(queryseq, blank_symbol, queryseq_mask=None):
     """
     Pad queryseq and corresponding queryseq_mask with blank symbol
     :param queryseq  (L, B)
     :param queryseq_mask (L, B)
     :param blank_symbol  scalar
     :return queryseq_padded, queryseq_mask_padded, both with shape (2L+1, B)
     """
     # for queryseq
     queryseq_extended = queryseq.dimshuffle(1, 0, 'x')                              # (L, B) -> (B, L, 1)
     blanks = tensor.zeros_like(queryseq_extended) + blank_symbol                    # (B, L, 1)
     concat = tensor.concatenate([queryseq_extended, blanks], axis=2)                # concat.shape = (B, L, 2)
     res = concat.reshape((concat.shape[0], concat.shape[1] * concat.shape[2])).T    # res.shape = (2L, B), the reshape will cause the last 2 dimensions interlace
     begining_blanks = tensor.zeros((1, res.shape[1])) + blank_symbol                # (1, B)
     queryseq_padded = tensor.concatenate([begining_blanks, res], axis=0)            # (1+2L, B)
     # for queryseq_mask
     if queryseq_mask is not None:
         queryseq_mask_extended = queryseq_mask.dimshuffle(1, 0, 'x')                          # (L, B) -> (B, L, 1)
         concat = tensor.concatenate([queryseq_mask_extended, queryseq_mask_extended], axis=2) # concat.shape = (B, L, 2)
         res = concat.reshape((concat.shape[0], concat.shape[1] * concat.shape[2])).T
         begining_blanks = tensor.ones((1, res.shape[1]), dtype=floatX)
         queryseq_mask_padded = tensor.concatenate([begining_blanks, res], axis=0)
     else:
         queryseq_mask_padded = None
     return queryseq_padded, queryseq_mask_padded
开发者ID:DingKe,项目名称:Precise-CTC,代码行数:25,代码来源:ctc_theano.py


示例20: create_TrainFunc_tranPES

def create_TrainFunc_tranPES(simfn, embeddings,  marge=0.5, alpha=1., beta=1.):

    # parse the embedding data
    embedding = embeddings[0] # D x N matrix
    lembedding = embeddings[1]

    # declare the symbolic variables for training triples
    hp = S.csr_matrix('head positive') # N x batchsize matrix
    rp = S.csr_matrix('relation')
    tp = S.csr_matrix('tail positive')

    hn = S.csr_matrix('head negative')
    tn = S.csr_matrix('tail negative')

    lemb = T.scalar('embedding learning rate')
    lremb = T.scalar('relation learning rate')

    subtensorE = T.ivector('batch entities set')
    subtensorR = T.ivector('batch link set')

    # Generate the training positive and negative triples
    hpmat = S.dot(embedding.E, hp).T #  batchsize x D dense matrix
    rpmat = S.dot(lembedding.E, rp).T
    tpmat = S.dot(embedding.E, tp).T

    hnmat = S.dot(embedding.E, hn).T
    tnmat = S.dot(embedding.E, tn).T

    # calculate the score
    pos = tranPES3(simfn, T.concatenate([hpmat, tpmat], axis=1).reshape((hpmat.shape[0], 2, hpmat.shape[1])).dimshuffle(0, 2, 1), hpmat, rpmat, tpmat)


    negh = tranPES3(simfn, T.concatenate([hnmat, tpmat], axis=1).reshape((hnmat.shape[0], 2, hnmat.shape[1])).dimshuffle(0, 2, 1), hnmat, rpmat, tpmat)
    negt = tranPES3(simfn, T.concatenate([hpmat, tnmat], axis=1).reshape((hpmat.shape[0], 2, hpmat.shape[1])).dimshuffle(0, 2, 1), hpmat, rpmat, tnmat)

    costh, outh = margeCost(pos, negh, marge)
    costt, outt = margeCost(pos, negt, marge)

    embreg = regEmb(embedding, subtensorE, alpha)
    lembreg = regLink(lembedding, subtensorR, beta)
    

    cost = costh + costt + embreg[0] + lembreg
    out = T.concatenate([outh, outt])
    outc = embreg[1]

    # list of inputs to the function
    list_in = [lemb, lremb, hp, rp, tp, hn, tn, subtensorE, subtensorR]

    # updating the embeddings using gradient descend
    emb_grad = T.grad(cost, embedding.E)
    New_embedding = embedding.E - lemb*emb_grad

    remb_grad = T.grad(cost, lembedding.E)
    New_rembedding = lembedding.E - lremb * remb_grad

    updates = OrderedDict({embedding.E: New_embedding, lembedding.E: New_rembedding})

    return theano.function(list_in, [cost, T.mean(out), T.mean(outc), embreg[0], lembreg],
                          updates=updates, on_unused_input='ignore')
开发者ID:while519,项目名称:tranpes,代码行数:60,代码来源:model.py



注:本文中的theano.tensor.concatenate函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。


鲜花

握手

雷人

路过

鸡蛋
该文章已有0人参与评论

请发表评论

全部评论

专题导读
上一篇:
Python tensor.constant函数代码示例发布时间:2022-05-27
下一篇:
Python tensor.col函数代码示例发布时间:2022-05-27
热门推荐
阅读排行榜

扫描微信二维码

查看手机版网站

随时了解更新最新资讯

139-2527-9053

在线客服(服务时间 9:00~18:00)

在线QQ客服
地址:深圳市南山区西丽大学城创智工业园
电邮:jeky_zhao#qq.com
移动电话:139-2527-9053

Powered by 互联科技 X3.4© 2001-2213 极客世界.|Sitemap