• 设为首页
  • 点击收藏
  • 手机版
    手机扫一扫访问
    迪恩网络手机版
  • 关注官方公众号
    微信扫一扫关注
    公众号

Python sparse.dot函数代码示例

原作者: [db:作者] 来自: [db:来源] 收藏 邀请

本文整理汇总了Python中theano.sparse.dot函数的典型用法代码示例。如果您正苦于以下问题:Python dot函数的具体用法?Python dot怎么用?Python dot使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。



在下文中一共展示了dot函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。

示例1: __init__

 def __init__(self, rng, P_input, L2_input, **kwargs):
     #symbol declaration, initialization and definition
     x_1_tm1, x_t = (\
             sparse.csr_matrix("x_1_tm1", dtype=theano.config.floatX),\
             sparse.csr_matrix("x_t",dtype=theano.config.floatX)\
         )\
         if P_input is None else P_input[:2]
     
     #elements of history
     shape = kwargs.get("shape")
     if shape is not None:
         dict_size = shape[0]
         if len(shape) <= 1:
             del shape["shape"]
         else:
             shape["shape"] = shape["shape"][1:]
     else:
         dict_size = (16,1,32,32)
     D_1_tm1 = theano.shared(rng.normal(size=dict_size).astype(theano.config.floatX))        
     Dx_1_tm1 = sparse.dot(x_1_tm1, D_1_tm1)#array access=dot operation      
     super(SequenceCNN, self).__init__(rng=rng, inputsymbol=Dx_1_tm1, **kwargs)#attaches new elements into the fgraph
     self.L2_output_1_tm1 = self.L2_output
     
     #elements of current time
     D_t = theano.shared(rng.normal(size=dict_size).astype(theano.config.floatX))        
     Dx_t = sparse.dot(x_t, D_t)#array access=dot operation
     self.L2_output_t = theano.clone(self.L2_output_1_tm1, replace={Dx_1_tm1:Dx_t})
     
     #element prepartion for model building
     self.P_input = (x_1_tm1,x_t)
     self.params += [D_1_tm1, D_t]
     self.L2_output = self.L2_output_1_tm1*self.L2_output_t
开发者ID:citihome,项目名称:utils,代码行数:32,代码来源:dnn.py


示例2: create_TrainFunc_tranPES

def create_TrainFunc_tranPES(simfn, embeddings,  marge=0.5, alpha=1., beta=1.):

    # parse the embedding data
    embedding = embeddings[0] # D x N matrix
    lembedding = embeddings[1]

    # declare the symbolic variables for training triples
    hp = S.csr_matrix('head positive') # N x batchsize matrix
    rp = S.csr_matrix('relation')
    tp = S.csr_matrix('tail positive')

    hn = S.csr_matrix('head negative')
    tn = S.csr_matrix('tail negative')

    lemb = T.scalar('embedding learning rate')
    lremb = T.scalar('relation learning rate')

    subtensorE = T.ivector('batch entities set')
    subtensorR = T.ivector('batch link set')

    # Generate the training positive and negative triples
    hpmat = S.dot(embedding.E, hp).T #  batchsize x D dense matrix
    rpmat = S.dot(lembedding.E, rp).T
    tpmat = S.dot(embedding.E, tp).T

    hnmat = S.dot(embedding.E, hn).T
    tnmat = S.dot(embedding.E, tn).T

    # calculate the score
    pos = tranPES3(simfn, T.concatenate([hpmat, tpmat], axis=1).reshape((hpmat.shape[0], 2, hpmat.shape[1])).dimshuffle(0, 2, 1), hpmat, rpmat, tpmat)


    negh = tranPES3(simfn, T.concatenate([hnmat, tpmat], axis=1).reshape((hnmat.shape[0], 2, hnmat.shape[1])).dimshuffle(0, 2, 1), hnmat, rpmat, tpmat)
    negt = tranPES3(simfn, T.concatenate([hpmat, tnmat], axis=1).reshape((hpmat.shape[0], 2, hpmat.shape[1])).dimshuffle(0, 2, 1), hpmat, rpmat, tnmat)

    costh, outh = margeCost(pos, negh, marge)
    costt, outt = margeCost(pos, negt, marge)

    embreg = regEmb(embedding, subtensorE, alpha)
    lembreg = regLink(lembedding, subtensorR, beta)
    

    cost = costh + costt + embreg[0] + lembreg
    out = T.concatenate([outh, outt])
    outc = embreg[1]

    # list of inputs to the function
    list_in = [lemb, lremb, hp, rp, tp, hn, tn, subtensorE, subtensorR]

    # updating the embeddings using gradient descend
    emb_grad = T.grad(cost, embedding.E)
    New_embedding = embedding.E - lemb*emb_grad

    remb_grad = T.grad(cost, lembedding.E)
    New_rembedding = lembedding.E - lremb * remb_grad

    updates = OrderedDict({embedding.E: New_embedding, lembedding.E: New_rembedding})

    return theano.function(list_in, [cost, T.mean(out), T.mean(outc), embreg[0], lembreg],
                          updates=updates, on_unused_input='ignore')
开发者ID:while519,项目名称:tranpes,代码行数:60,代码来源:model.py


示例3: fprop

    def fprop(self, state_below, add_noise=True):
        self.input_space.validate(state_below)

        if self.requires_reformat:
            if not isinstance(state_below, tuple):
                for sb in get_debug_values(state_below):
                    if sb.shape[0] != self.dbm.batch_size:
                        raise ValueError("self.dbm.batch_size is %d but got shape of %d" % (self.dbm.batch_size, sb.shape[0]))
                    assert reduce(lambda x,y: x * y, sb.shape[1:]) == self.input_dim

            state_below = self.input_space.format_as(state_below, self.desired_space)
        
        self.x = state_below
        
        # linear part
        if isinstance(self.x, S.SparseVariable):
            z = S.dot(self.x,self.W[0]) + self.b[0]
        else:
            z = T.dot(self.x,self.W[0]) + self.b[0]
        
        self.z = self.activate(z, self.expert_activation)
        
        # first layer non-linear part
        if isinstance(self.x, S.SparseVariable):
            h = S.dot(self.x,self.W[1]) + self.b[1]
        else:
            h = T.dot(self.x,self.W[1]) + self.b[1]
        
        # activate hidden units of non-linear part
        self.h = self.activate(h, self.hidden_activation)
            
        noise = 0.
        if add_noise:
            rng = MRG_RandomStreams(self.mlp.rng.randint(2**15))
            noise = rng.normal(size = self.z.shape, 
                                    std=self.noise_stdev ,
                                    dtype=self.z.type.dtype) 
        
        # second layer non-linear part
        self.a = T.dot(self.h,self.W[2]) + self.b[2] + noise
        
        # activate non-linear part
        self.m_mean = self.activate(self.a, self.gater_activation)
        
        # how many are over 0:
        self.effective_sparsity = T.cast(T.gt(self.m_mean, 0), 
                                         theano.config.floatX).mean()
           
        # mix output of linear part with output of non-linear part
        self.p = self.m_mean * self.z
        
        if self.layer_name is not None:
            self.z.name = self.layer_name + '_z'
            self.h.name = self.layer_name + '_h'
            self.a.name = self.layer_name + '_a'
            self.m_mean.name = self.layer_name + '_m_mean'
            self.p.name = self.layer_name + '_p'
        
        return self.p
开发者ID:jfsantos,项目名称:ift6266h14,代码行数:59,代码来源:conditional_gater.py


示例4: ForwardFn

def ForwardFn(fnsim, embeddings, leftop, rightop, marge=1.0):
    """
    This function returns a theano function to perform a forward step,
    contrasting couples of positive and negative triplets. members are given
    as sparse matrices. For one positive triplet there is one negative
    triplet.

    :param fnsim: similarity function (on theano variables).
    :param embeddings: an embeddings instance.
    :param leftop: class for the 'left' operator.
    :param rightop: class for the 'right' operator.
    :param marge: marge for the cost function.

    :note: this is useful for W_SABIE [Weston et al., IJCAI 2011]
    """
    embedding, relationl, relationr = parse_embeddings(embeddings)

    # inputs
    inpr = S.csr_matrix()
    inpl = S.csr_matrix()
    inpo = S.csr_matrix()
    inpln = S.csr_matrix()
    inprn = S.csr_matrix()
    inpon = S.csr_matrix()

    # graph
    lhs = S.dot(embedding.E, inpl).T
    rhs = S.dot(embedding.E, inpr).T
    rell = S.dot(relationl.E, inpo).T
    relr = S.dot(relationr.E, inpo).T
    lhsn = S.dot(embedding.E, inpln).T
    rhsn = S.dot(embedding.E, inprn).T
    relln = S.dot(relationl.E, inpon).T
    relrn = S.dot(relationr.E, inpon).T
    simi = fnsim(leftop(lhs, rell), rightop(rhs, relr))
    simin = fnsim(leftop(lhsn, relln), rightop(rhsn, relrn))
    cost, out = margincost(simi, simin, marge)
    """
    Theano function inputs.
    :input inpl: sparse csr matrix representing the indexes of the positive
                 triplet 'left' member, shape=(#examples,N [Embeddings]).
    :input inpr: sparse csr matrix representing the indexes of the positive
                 triplet 'right' member, shape=(#examples,N [Embeddings]).
    :input inpo: sparse csr matrix representing the indexes of the positive
                 triplet relation member, shape=(#examples,N [Embeddings]).
    :input inpln: sparse csr matrix representing the indexes of the negative
                  triplet 'left' member, shape=(#examples,N [Embeddings]).
    :input inprn: sparse csr matrix representing the indexes of the negative
                  triplet 'right' member, shape=(#examples,N [Embeddings]).
    :input inpon: sparse csr matrix representing the indexes of the negative
                  triplet relation member, shape=(#examples,N [Embeddings]).

    Theano function output.
    :output out: binary vector representing when the margin is violated, i.e.
                 when an update occurs.
    """
    return theano.function([inpl, inpr, inpo,
                           inpln, inprn, inpon], [out],
                           on_unused_input='ignore')
开发者ID:DevSinghSachan,项目名称:SME,代码行数:59,代码来源:model.py


示例5: SimFn

def SimFn(fnsim, embeddings, leftop, rightop):
    """
    This function returns a Theano function to measure the similarity score
    for sparse matrices inputs.

    :param fnsim: similarity function (on Theano variables).
    :param embeddings: an Embeddings instance.
    :param leftop: class for the 'left' operator.
    :param rightop: class for the 'right' operator.
    """
    embedding, relationl, relationr = parse_embeddings(embeddings)

    # Inputs
    inpr = S.csr_matrix('inpr')
    inpl = S.csr_matrix('inpl')
    inpo = S.csr_matrix('inpo')
    # Graph
    #what is T? Are they tensor? lhs, rhs,rell,relr 
    # we just created inpl and inplr inpo . what does it mean to calculate dot product?
    lhs = S.dot(embedding.E, inpl).T
    rhs = S.dot(embedding.E, inpr).T
    rell = S.dot(relationl.E, inpo).T
    relr = S.dot(relationr.E, inpo).T
    
    # what is this?
    #ref:
    #leftop = LayerMat('lin', state.ndim, state.nhid)
    #rightop = LayerMat('lin', state.ndim, state.nhid)
    # on call 
    #ry = y.reshape((y.shape[0], self.n_inp, self.n_out))
    #rx = x.reshape((x.shape[0], x.shape[1], 1))
    #return self.act((rx * ry).sum(1))
    
    simi = fnsim(leftop(lhs, rell), rightop(rhs, relr))
    """
    Theano function inputs.
    :input inpl: sparse csr matrix (representing the indexes of the 'left'
                    entities), shape=(#examples, N [Embeddings]).
    :input inpr: sparse csr matrix (representing the indexes of the 'right'
                    entities), shape=(#examples, N [Embeddings]).
    :input inpo: sparse csr matrix (representing the indexes of the
                    relation member), shape=(#examples, N [Embeddings]).

    Theano function output
    :output simi: matrix of score values.
    """
    return theano.function([inpl, inpr, inpo], [simi],
            on_unused_input='ignore')
开发者ID:ehsankddm,项目名称:thesis,代码行数:48,代码来源:model.py


示例6: get_train_function

	def get_train_function(self):
		# specify the computational graph
		target = T.matrix('target')
		weight = theano.shared(np.random.randn(len(self.feature_map), len(self.label_map)), name='weight')
		feat_mat = sparse.csr_matrix(name='feat_mat')
		mask_mat = sparse.csr_matrix(name='mask_mat')
		sum_pred = sparse.dot( mask_mat, T.nnet.softmax( sparse.dot(feat_mat, weight) ) )
		pred = sum_pred / sum_pred.sum(axis=1).reshape((sum_pred.shape[0], 1))
		objective = T.nnet.categorical_crossentropy(pred, target).sum() + self.param.l2_regularization * (weight ** 2).sum()
		grad_weight = T.grad(objective, weight)

		# print 'Compiling function ...'
		# compile the function
		train = theano.function(inputs = [feat_mat, mask_mat, target], outputs = [objective, weight], updates=[(weight, weight - 0.1*grad_weight)] )

		return train
开发者ID:ShiyanYan,项目名称:gelearn,代码行数:16,代码来源:theano_learner.py


示例7: labelFunct

 def labelFunct(self, batchSize, xFeats):
     #  xFeats [l, h]
     # l = batchSize
     # self.W = theano.printing.Print("W ") (self.W)
     # self.Wb = theano.printing.Print("Wb ") (self.Wb)
     scores = sparse.dot(xFeats, self.W) + self.Wb  # [l, h] x [h, r] => [l, r]
     relationProbs = T.nnet.softmax(scores)
     # scores = theano.printing.Print("scores ") (scores)
     labels = T.argmax(scores, axis=1)  #  [l, r] => [l]
     # labels = theano.printing.Print("labels ") (labels)
     return (labels, relationProbs)
开发者ID:Simon-X,项目名称:relation-autoencoder,代码行数:11,代码来源:RelationClassifier.py


示例8: _get_diagonal_term

    def _get_diagonal_term(self, X_left, X_right, diag_init):
        diag = tn.shared(value=diag_init, name='diag')

        if _tn_is_sparse(X_left) or _tn_is_sparse(X_right):
            XlXr = tsp.mul(X_left, X_right)
            y_pred = tsp.dot(XlXr, diag)
        else:
            XlXr = T.mul(X_left, X_right)
            y_pred = T.dot(XlXr, diag)

        return y_pred, [diag]
开发者ID:vene,项目名称:bilearn,代码行数:11,代码来源:sg_theano.py


示例9: get_train_function

	def get_train_function(self):
		# specify the computational graph
		weight = theano.shared(np.random.randn(len(self.feature_map), len(self.label_map)), name='weight')
		# weight = theano.shared(np.zeros((len(self.feature_map), len(self.label_map))), name='weight')
		feat_mat = sparse.csr_matrix(name='feat_mat')

		f_target = T.matrix('f_target')
		f_mask_mat = sparse.csr_matrix(name='f_mask_mat')
		f_sum_pred = sparse.dot( f_mask_mat, T.nnet.softmax( sparse.dot(feat_mat, weight) ) )
		f_pred = f_sum_pred / f_sum_pred.sum(axis=1).reshape((f_sum_pred.shape[0], 1))

		i_target = T.matrix('i_target')
		i_mask_mat = sparse.csr_matrix(name='l_mask_mat')
		i_pred = sparse.dot( i_mask_mat, T.nnet.softmax( sparse.dot(feat_mat, weight) ) )

		objective = self.param.feature_lambda * T.nnet.categorical_crossentropy(f_pred, f_target).sum() + T.nnet.categorical_crossentropy(i_pred, i_target).sum() + self.param.l2_lambda * (weight ** 2).sum() / 2
		grad_weight = T.grad(objective, weight)

		# print 'Compiling function ...'
		# compile the function
		train = theano.function(inputs = [feat_mat, f_mask_mat, f_target, i_mask_mat, i_target], outputs = [objective, weight], updates=[(weight, weight - 0.1*grad_weight)] )

		return train
开发者ID:lookatmoon,项目名称:gelearn,代码行数:23,代码来源:theano_learner_v2.py


示例10: SimFn

def SimFn(fnsim, embeddings, leftop, rightop, op=''):
    """
    This function returns a Theano function to measure the similarity score for sparse matrices inputs.

    :param fnsim: similarity function (on Theano variables).
    :param embeddings: an Embeddings instance.
    :param leftop: class for the 'left' operator.
    :param rightop: class for the 'right' operator.
    """

    embedding, relationl, relationr = parse_embeddings(embeddings)

    # Inputs

    inpr, inpl, inpo = S.csr_matrix('inpr'), S.csr_matrix('inpl'), S.csr_matrix('inpo')

    # Graph
    lhs = S.dot(embedding.E, inpl).T
    rhs = S.dot(embedding.E, inpr).T

    rell = S.dot(relationl.E, inpo).T
    relr = S.dot(relationr.E, inpo).T

    lop, rop = leftop(lhs, rell), rightop(rhs, relr)

    simi = fnsim(lop, rop)

    """
    Theano function inputs.
    :input inpl: sparse csr matrix (representing the indexes of the 'left' entities), shape=(#examples, N [Embeddings]).
    :input inpr: sparse csr matrix (representing the indexes of the 'right' entities), shape=(#examples, N [Embeddings]).
    :input inpo: sparse csr matrix (representing the indexes of the relation member), shape=(#examples, N [Embeddings]).

    Theano function output
    :output simi: matrix of score values.
    """
    return theano.function([inpl, inpr, inpo], [simi], on_unused_input='ignore')
开发者ID:pminervini,项目名称:ebemkg,代码行数:37,代码来源:learning.py


示例11: compRelationProbsFunc

    def compRelationProbsFunc(self, xFeats):
        #  xFeats [l, h] matrix
        # xFeats = theano.printing.Print("xFeats")(xFeats)
        # self.Wb = theano.printing.Print("Wb ") (self.Wb)
        # self.W = theano.printing.Print("W ") (self.W)
        # scores of each role by a classifier
        relationScores = sparse.dot(xFeats, self.W) + self.Wb   # [l, h] x [h, r] => [l, r]
        #relationScores = theano.printing.Print("relationScores=")(relationScores)

        # convert it to probabilities
        relationProbs = T.nnet.softmax(relationScores)
        #relationProbs = theano.printing.Print("relationProbs = ")(relationProbs)


        return relationProbs  # [l, r]
开发者ID:Simon-X,项目名称:relation-autoencoder,代码行数:15,代码来源:RelationClassifier.py


示例12: get_output_for

    def get_output_for(self, input, **kwargs):
        if input.ndim > 2:
            # if the input has more than two dimensions, flatten it into a
            # batch of feature vectors.
            input = input.flatten(2)

        # According to pull-request 595 from eduardo4jesus
        # Though it might be the case, the activation layer will remain
        # dense since Weights represent dense matrix ( Kinda makes sense)

        if (type(input) == S.SparseVariable) or (type(input) == S.SparseConstant):
            activation = S.dot(input, self.W)
        else:
            activation = T.dot(input, self.W)
        if self.b is not None:
            activation = activation + self.b.dimshuffle('x', 0)
        return self.nonlinearity(activation)
开发者ID:hmendozap,项目名称:Lasagne,代码行数:17,代码来源:dense.py


示例13: __init__

 def __init__(self, rng, x, topic_num=100):
     
     #input
     L2_input = sparse.csr_matrix("x",dtype=theano.config.floatX)
     #params
     vocab_size = x.shape[1]
     mu, sigma = x.data.mean(), x.data.var()**0.5
     
     rng = numpy.random.RandomState(numpy.random.randint(2**32-1)) if rng is None else rng
     self.L2_w = theano.shared(\
         numpy.asarray(\
             rng.normal(loc=mu,scale=sigma,size=(vocab_size, topic_num)),\
             dtype=theano.config.floatX\
         ),\
         borrow=True\
     )
     self.L2_b = theano.shared(numpy.zeros(topic_num,dtype=theano.config.floatX), borrow=True)
     self.params = [self.L2_w, self.L2_b]
     
     #stick-breaking:sticks->orthgonal sticks
     L2_stick = sparse.dot(L2_input,self.L2_w)+self.L2_b-\
         0.5*(L2_input.size/vocab_size*tensor.sum(self.L2_w**2,0)+self.L2_b**2)  
     zero_space = tensor.zeros((L2_input.shape[0],1),dtype=theano.config.floatX)
     L2_orth_stick = tensor.join(1, L2_stick, zero_space)\
         - tensor.join(1, zero_space, tensor.cumsum(L2_stick,1))
     Pasterik_orth_stick = tensor.log(1 + tensor.exp(L2_orth_stick))      
     #training model definition
     Likelihood = tensor.mean(Pasterik_orth_stick)
     grads = theano.grad(Likelihood, self.params)#gradient w.r.t params
     eta = tensor.scalar("eta")
     updates = [(param, param+eta*grad) for param, grad in zip(self.params, grads)]
     self._fit = theano.function(\
         inputs=[L2_input, eta],\
         outputs=Likelihood,\
         updates=updates\
     )
     #predict model definition
     self._predict = theano.function(\
         inputs=[L2_input],\
         outputs=tensor.argmax(L2_stick,axis=-1)\
     )
     self._codec = theano.function(\
         inputs=[L2_input],\
         outputs=L2_stick>0\
     )
开发者ID:citihome,项目名称:nlp,代码行数:45,代码来源:dual_hdp_theano.py


示例14: __init__

 def __init__(self, rng, x, topic_num=100):
     #input
     L2_input = sparse.csr_matrix("x",dtype=theano.config.floatX)
     #params
     vocab_size = x.shape[1]
     mu, sigma = x.data.mean(), 2.56*x.data.var()**0.5
     
     rng = numpy.random.RandomState(numpy.random.randint(2**32-1)) if rng is None else rng
     self.L2_w = theano.shared(\
         numpy.asarray(\
             mu + (mu if mu < sigma else sigma)*rng.uniform(low=-1,high=1,size=(vocab_size, topic_num)),\
             dtype=theano.config.floatX\
         ),\
         borrow=True\
     )
     self.L2_b = theano.shared(numpy.zeros(topic_num, dtype=theano.config.floatX), borrow=True)
     
     self.params = [self.L2_w, self.L2_b]
     #output
     L2_topic = sparse.dot(L2_input,self.L2_w)+self.L2_b
             
     #difference based objective function
     Pasterik_topic = tensor.log(tensor.sum(tensor.exp(L2_topic-L2_topic.max(-1, keepdims=True)),-1))#avoiding overflow
     d_xw_w2 = tensor.mean(Pasterik_topic) -\
         0.5*(L2_input.size*tensor.mean(self.L2_w*self.L2_w)+tensor.dot(self.L2_b,self.L2_b))
     grads = theano.grad(d_xw_w2, self.params)#gradient w.r.t params
     eta = tensor.scalar("eta")
     updates = [(param, param+eta*grad) for param, grad in zip(self.params, grads)]
     #training model definition
     self._fit = theano.function(\
         inputs=[L2_input, eta],\
         outputs=d_xw_w2, \
         updates=updates\
     )
     #predict model definition
     self._predict = theano.function(\
         inputs=[L2_input],\
         outputs=tensor.argmax(L2_topic,axis=-1)\
     )       
开发者ID:citihome,项目名称:nlp,代码行数:39,代码来源:dual_lda_theano.py


示例15: TrainFn

def TrainFn(fnsim, embeddings, leftop, rightop, marge=1.0):
    """
    This function returns a theano function to perform a training iteration,
    contrasting couples of positive and negative triplets. members are given
    as sparse matrices. for one positive triplet there is one negative
    triplet.

    :param fnsim: similarity function (on theano variables).
    :param embeddings: an embeddings instance.
    :param leftop: class for the 'left' operator.
    :param rightop: class for the 'right' operator.
    :param marge: marge For the cost function.
    """
    embedding, relationl, relationr = parse_embeddings(embeddings)
    # Inputs
    inpr = S.csr_matrix()
    inpl = S.csr_matrix()
    inpo = S.csr_matrix()
    inpln = S.csr_matrix()
    inprn = S.csr_matrix()
    inpon = S.csr_matrix()
    lrparams = T.scalar('lrparams')
    lrembeddings = T.scalar('lrembeddings')

    # Graph
    ## Positive triplet
    lhs = S.dot(embedding.E, inpl).T
    rhs = S.dot(embedding.E, inpr).T
    rell = S.dot(relationl.E, inpo).T
    relr = S.dot(relationr.E, inpo).T
    simi = fnsim(leftop(lhs, rell), rightop(rhs, relr))
    ## Negative triplet
    lhsn = S.dot(embedding.E, inpln).T
    rhsn = S.dot(embedding.E, inprn).T
    relln = S.dot(relationl.E, inpon).T
    relrn = S.dot(relationr.E, inpon).T
    simin = fnsim(leftop(lhsn, relln), rightop(rhsn, relrn))

    cost, out = margincost(simi, simin, marge)
    # Parameters gradients
    if hasattr(fnsim, 'params'):
        # If the similarity function has some parameters, we update them too.
        gradientsparams = T.grad(cost,
            leftop.params + rightop.params + fnsim.params)
        updates = OrderedDict((i, i - lrparams * j) for i, j in zip(
            leftop.params + rightop.params + fnsim.params, gradientsparams))
    else:
        gradientsparams = T.grad(cost, leftop.params + rightop.params)
        updates = OrderedDict((i, i - lrparams * j) for i, j in zip(
            leftop.params + rightop.params, gradientsparams))
    # Embeddings gradients
    gradients_embedding = T.grad(cost, embedding.E)
    newE = embedding.E - lrembeddings * gradients_embedding
    updates.update({embedding.E: newE})
    if type(embeddings) == list:
        # If there are different embeddings for the relation member.
        gradients_embedding = T.grad(cost, relationl.E)
        newE = relationl.E - lrparams * gradients_embedding
        updates.update({relationl.E: newE})
        gradients_embedding = T.grad(cost, relationr.E)
        newE = relationr.E - lrparams * gradients_embedding
        updates.update({relationr.E: newE})
    """
    Theano function inputs.
    :input lrembeddings: learning rate for the embeddings.
    :input lrparams: learning rate for the parameters.
    :input inpl: sparse csr matrix representing the indexes of the positive
                 triplet 'left' member, shape=(#examples,N [Embeddings]).
    :input inpr: sparse csr matrix representing the indexes of the positive
                 triplet 'right' member, shape=(#examples,N [Embeddings]).
    :input inpo: sparse csr matrix representing the indexes of the positive
                 triplet relation member, shape=(#examples,N [Embeddings]).
    :input inpln: sparse csr matrix representing the indexes of the negative
                  triplet 'left' member, shape=(#examples,N [Embeddings]).
    :input inprn: sparse csr matrix representing the indexes of the negative
                  triplet 'right' member, shape=(#examples,N [Embeddings]).
    :input inpon: sparse csr matrix representing the indexes of the negative
                  triplet relation member, shape=(#examples,N [Embeddings]).

    Theano function output.
    :output mean(cost): average cost.
    :output mean(out): ratio of examples for which the margin is violated,
                       i.e. for which an update occurs.
    """
    return theano.function([lrembeddings, lrparams, inpl, inpr, inpo,
                           inpln, inprn, inpon],
                           [T.mean(cost), T.mean(out)], updates=updates,
                           on_unused_input='ignore')
开发者ID:DevSinghSachan,项目名称:SME,代码行数:88,代码来源:model.py


示例16: sparse_slice_rows

def sparse_slice_rows(H, idx):
    '''Returns a dense slice H[idx, :]'''

    vecs = to_one_hot(idx, H.shape[0], dtype=H.dtype)

    return ts.dot(vecs, H)
开发者ID:bmcfee,项目名称:playlist_recommender,代码行数:6,代码来源:shyrp.py


示例17: theano_safe_sparse_dot

def theano_safe_sparse_dot(X, Y):
    if _tn_is_sparse(X) or _tn_is_sparse(Y):
        return tsp.dot(X, Y)
    else:
        return T.dot(X, Y)
开发者ID:vene,项目名称:bilearn,代码行数:5,代码来源:sg_theano.py


示例18: RankRelFn

def RankRelFn(fnsim, embeddings, leftop, rightop,
              subtensorspec=None, adding=False):
    """
    This function returns a Theano function to measure the similarity score of
    all relation entities given couples of 'right' and 'left' entities (as
    sparse matrices).

    :param fnsim: similarity function (on Theano variables).
    :param embeddings: an Embeddings instance.
    :param leftop: class for the 'left' operator.
    :param rightop: class for the 'right' operator.
    :param subtensorspec: only measure the similarity score for the entities
                          corresponding to the first subtensorspec (int)
                          entities of the embedding matrix (default None: all
                          entities)
    :param adding: if the right member is composed of several entities the
                   function needs to more inputs: we have to add the embedding
                   value of the other entities (with the appropriate scaling
                   factor to perform the mean pooling).
    """
    embedding, relationl, relationr = parse_embeddings(embeddings)

    # Inputs
    inpr = S.csr_matrix('inpr')
    inpl = S.csr_matrix('inpl')
    if adding:
        inpoadd = S.csr_matrix('inpoadd')
        scal = T.scalar('scal')
    # Graph
    if subtensorspec is None:
        rell = relationl.E
        relr = relationr.E
    else:
        # We compute the score only for a subset of entities
        rell = relationl.E[:, :subtensorspec].T
        relr = relationr.E[:, :subtensorspec].T
    if adding:
        # Add the embeddings of the other entities (mean pooling)
        rell = rell * scal + (S.dot(relationl.E, inpoadd).T).reshape(
                (1, embedding.D))
        relr = relr * scal + (S.dot(relationr.E, inpoadd).T).reshape(
                (1, embedding.D))
    lhs = (S.dot(embedding.E, inpl).T).reshape((1, embedding.D))
    rhs = (S.dot(embedding.E, inpr).T).reshape((1, embedding.D))
    # hack to prevent a broadcast problem with the Bilinear layer
    if hasattr(leftop, 'forwardrankrel'):
        tmpleft = leftop.forwardrankrel(lhs, rell)
    else:
        tmpleft = leftop(lhs, rell)
    if hasattr(rightop, 'forwardrankrel'):
        tmpright = rightop.forwardrankrel(rhs, relr)
    else:
        tmpright = rightop(lhs, rell)
    simi = fnsim(tmpleft, tmpright)
    """
    Theano function inputs.
    :input inpl: sparse csr matrix representing the indexes of the 'left'
                 entities, shape=(#examples,N [Embeddings]).
    :input inpr: sparse csr matrix representing the indexes of the 'right'
                 entities, shape=(#examples,N [Embeddings]).
    :opt input inpoadd: sparse csr matrix representing the indexes of the
                        other entities of the relation member with the
                        appropriate scaling factor, shape = (#examples, N
                        [Embeddings]).
    :opt input scal: scaling factor to perform the mean: 1 / [#entities in the
                     member].

    Theano function output.
    :output simi: matrix of score values.
    """
    if not adding:
        return theano.function([inpl, inpr], [simi], on_unused_input='ignore')
    else:
        return theano.function([inpl, inpr, inpoadd, scal], [simi],
                on_unused_input='ignore')
开发者ID:DevSinghSachan,项目名称:SME,代码行数:75,代码来源:model.py


示例19: __init__

 def __init__(self, nodenet):
     if nodenet.sparse:
         self.propagate = theano.function([], [nodenet.w, nodenet.a], updates={nodenet.a: ST.dot(nodenet.w, nodenet.a)})
     else:
         self.propagate = theano.function([], [nodenet.w, nodenet.a], updates={nodenet.a: T.dot(nodenet.w, nodenet.a)})
开发者ID:brucepro,项目名称:micropsi2,代码行数:5,代码来源:theano_stepoperators.py


示例20: run

def run(jobman,debug = False):
    expstart = time.time()
    hp = jobman.state

    if not os.path.exists('files/'): os.mkdir('files/')

    # Symbolic variables
    s_posit = T.matrix()
    s_negat = T.matrix()
    idx_start = T.lscalar()
    idx_stop = T.lscalar()
    s_valid = theano.sparse.csr_matrix()



    w2i = cPickle.load(open('/mnt/scratch/bengio/bengio_group/data/gutenberg/merged_word2idx.pkl'))
    i2w = dict( (v,k) for k,v in w2i.iteritems() )
    i2w[0] = 'UNK'
    senna = [ i2w[i] for i in range(len(i2w.keys())) ]


    nsenna = len(senna)
    
    embedding = cae(i_size=nsenna, h_size=hp['embedsize'], e_act = identity)
    H = ae(i_size = hp['embedsize']*hp['wsize'], h_size=hp['hsize'], e_act = T.tanh)
    L = logistic(i_size = hp['hsize'], h_size = 1, act = identity)

    del H.params['d_bias']
    del embedding.params['d_bias']
    del embedding.params['e_bias']
    minsize = hp['minsize']
    maxsize = hp['maxsize']

    dsize = maxsize - minsize +1

    H.params['e_bias'] = theano.shared( numpy.array(numpy.zeros((dsize,hp['hsize'])),dtype=theano.config.floatX),name='e_bias')

    path = hp['loadpath']
 
    if path:
        load(embedding,path+'/embedding.pkl')
        #load(H,path+'/hidden.pkl')
        #load(L,path+'/logistic.pkl')
        hp['embedsize'] = embedding.params['e_weights'].get_value(borrow=True).shape[1]
        #hp['hsize'] = H.params['e_weights'].get_value(borrow=True).shape[1]
        jobman.save()

    H.params['e_bias'] = theano.shared( numpy.array(numpy.zeros((dsize,hp['hsize'])),dtype=theano.config.floatX),name='e_bias')
    valid_embedding = sparse.supervised.logistic(i_size=nsenna, h_size=hp['embedsize'], act = identity)
    valid_embedding.params['weights'] = sp.shared(value = scipy.sparse.csr_matrix(embedding.params['e_weights'].get_value(borrow=True)))


    lr = hp['lr']
    h_size = hp['hsize']
    bs = hp['bs']

    posit_embed = T.dot(s_posit, embedding.params['e_weights']).reshape((1,hp['embedsize']*hp['wsize']))
    negat_embed = T.dot(s_negat, embedding.params['e_weights']).reshape((hp['nneg'],hp['embedsize']*hp['wsize']))
    valid_embed = sp.dot(s_valid,valid_embedding.params['weights']).reshape((nsenna,hp['embedsize']*hp['wsize']))

    posit_embed_left = T.concatenate([posit_embed[:,idx_start*hp['embedsize']:idx_stop*hp['embedsize']],
                                  T.zeros_like(posit_embed[:,idx_stop*hp['embedsize']:]) ],axis=1)

    negat_embed_left = T.concatenate([negat_embed[:,idx_start*hp['embedsize']:idx_stop*hp['embedsize']],
                                   T.zeros_like(negat_embed[:,idx_stop*hp['embedsize']:]) ],axis=1)

    posit_embed_right = T.concatenate([ T.zeros_like(posit_embed[:,:idx_start*hp['embedsize']]),
                                  posit_embed[:,idx_start*hp['embedsize']:idx_stop*hp['embedsize']]],axis=1)

    negat_embed_right = T.concatenate([ T.zeros_like(negat_embed[:,:idx_start*hp['embedsize']]),
                                   negat_embed[:,idx_start*hp['embedsize']:idx_stop*hp['embedsize']]],axis=1)



    posit_embed = T.concatenate([ T.zeros_like(posit_embed[:,:idx_start*hp['embedsize']]),
                                  posit_embed[:,idx_start*hp['embedsize']:idx_stop*hp['embedsize']],
                                  T.zeros_like(posit_embed[:,idx_stop*hp['embedsize']:]) ],axis=1)

    negat_embed = T.concatenate([ T.zeros_like(negat_embed[:,:idx_start*hp['embedsize']]),
                                   negat_embed[:,idx_start*hp['embedsize']:idx_stop*hp['embedsize']],
                                   T.zeros_like(negat_embed[:,idx_stop*hp['embedsize']:]) ],axis=1)

    
    #posit_embed = ifelse(T.eq(idx_start, 0), posit_embed_left, posit_embed)
    #posit_embed = ifelse(T.eq(idx_stop, hp['maxsize']), posit_embed_right, posit_embed)

    #negat_embed = ifelse(T.eq(idx_start, 0), negat_embed_left, negat_embed)
    #negat_embed = ifelse(T.eq(idx_stop, hp['maxsize']), negat_embed_right, negat_embed)

    Hposit = T.tanh(T.dot(posit_embed,H.params['e_weights']) + H.params['e_bias'][idx_stop-idx_start-minsize,:])
    Hnegat = T.tanh(T.dot(negat_embed,H.params['e_weights']) + H.params['e_bias'][idx_stop-idx_start-minsize,:])
    posit_score = L.encode(Hposit)
    negat_score = L.encode(Hnegat)
    valid_score = L.encode(H.encode(valid_embed))

    C = (negat_score - posit_score.flatten() + hp['margin'])

    CC = (rect(C)).mean()

    opt = theano.function([s_posit, s_negat, idx_start, idx_stop],
#.........这里部分代码省略.........
开发者ID:srifai,项目名称:senna,代码行数:101,代码来源:gut_multibias_0001.py



注:本文中的theano.sparse.dot函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。


鲜花

握手

雷人

路过

鸡蛋
该文章已有0人参与评论

请发表评论

全部评论

专题导读
上一篇:
Python sparse.structured_dot函数代码示例发布时间:2022-05-27
下一篇:
Python sparse.csr_matrix函数代码示例发布时间:2022-05-27
热门推荐
阅读排行榜

扫描微信二维码

查看手机版网站

随时了解更新最新资讯

139-2527-9053

在线客服(服务时间 9:00~18:00)

在线QQ客服
地址:深圳市南山区西丽大学城创智工业园
电邮:jeky_zhao#qq.com
移动电话:139-2527-9053

Powered by 互联科技 X3.4© 2001-2213 极客世界.|Sitemap