• 设为首页
  • 点击收藏
  • 手机版
    手机扫一扫访问
    迪恩网络手机版
  • 关注官方公众号
    微信扫一扫关注
    公众号

Python tensor.lvector函数代码示例

原作者: [db:作者] 来自: [db:来源] 收藏 邀请

本文整理汇总了Python中theano.tensor.lvector函数的典型用法代码示例。如果您正苦于以下问题:Python lvector函数的具体用法?Python lvector怎么用?Python lvector使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。



在下文中一共展示了lvector函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。

示例1: __init__

	def __init__(self):
		self.name = self.__class__.__name__
		
		#Symbolic expressions for the prediction function (and compiled one too), the loss, the regularization, and the loss to
		#optimize (loss + lmbda * regul)
		#To be defined by the child classes:
		self.pred_func = None
		self.pred_func_compiled = None

		self.loss_func = None
		self.regul_func = None
		self.loss_to_opt = None
		
		#Symbolic variables for training values
		self.ys = TT.vector('ys')
		self.rows = TT.lvector('rows')
		self.cols = TT.lvector('cols')
		self.tubes = TT.lvector('tubes') 


		#Current values for which the loss is currently compiled
		#3 dimensions:
		self.n = 0 #Number of subject entities
		self.m = 0 #Number of relations
		self.l = 0 #Number of object entities
		#and rank:
		self.k = 0
		#and corresponding number of parameters (i.e. n*k + m*k + l*k for CP_Model)
		self.nb_params = 0
开发者ID:ttrouill,项目名称:complex,代码行数:29,代码来源:models.py


示例2: test_random_integers_vector

    def test_random_integers_vector(self):
        random = RandomStreams(utt.fetch_seed())
        low = tensor.lvector()
        high = tensor.lvector()
        out = random.random_integers(low=low, high=high)
        assert out.ndim == 1
        f = function([low, high], out)

        low_val = [100, 200, 300]
        high_val = [110, 220, 330]
        seed_gen = numpy.random.RandomState(utt.fetch_seed())
        numpy_rng = numpy.random.RandomState(int(seed_gen.randint(2**30)))

        # Arguments of size (3,)
        val0 = f(low_val, high_val)
        numpy_val0 = numpy.asarray([numpy_rng.randint(low=lv, high=hv+1)
            for lv, hv in zip(low_val, high_val)])
        assert numpy.all(val0 == numpy_val0)

        # arguments of size (2,)
        val1 = f(low_val[:-1], high_val[:-1])
        numpy_val1 = numpy.asarray([numpy_rng.randint(low=lv, high=hv+1)
            for lv, hv in zip(low_val[:-1], high_val[:-1])])
        assert numpy.all(val1 == numpy_val1)

        # Specifying the size explicitly
        g = function([low, high], random.random_integers(low=low, high=high, size=(3,)))
        val2 = g(low_val, high_val)
        numpy_rng = numpy.random.RandomState(int(seed_gen.randint(2**30)))
        numpy_val2 = numpy.asarray([numpy_rng.randint(low=lv, high=hv+1)
            for lv, hv in zip(low_val, high_val)])
        assert numpy.all(val2 == numpy_val2)
        self.assertRaises(ValueError, g, low_val[:-1], high_val[:-1])
开发者ID:ChinaQuants,项目名称:Theano,代码行数:33,代码来源:test_shared_randomstreams.py


示例3: predict_hidden

    def predict_hidden(self, dataset=None, batch_size=100):
        # compute number of minibatches for training, validation and testing
        n_test_batches = dataset.get_value(borrow=True).shape[0] / batch_size
        n_dim =  dataset.get_value(borrow=True).shape[1]

        # allocate symbolic variables for the data
        index = T.lvector()    # index to a [mini]batch
        index_hist = T.lvector()  # index to history

        #print hidden layer
        [pre_sigmoid_h1, h1_mean, h1_sample] = self.sample_h_given_v(self.input, self.input_history)

        print_hidden = theano.function(
            [index, index_hist],
            [h1_sample],
            givens={ self.input:dataset[index],
                     self.input_history:dataset[index_hist].reshape((batch_size, self.delay * self.n_visible))
                    },
            name='print_hidden'
            )

        # valid starting indices
        datasetindex = range(self.delay, dataset.get_value(borrow=True).shape[0])
        permindex = np.array(datasetindex)

        #For each frame in minibatch
        for batch_index in xrange(n_test_batches):
            data_idx = permindex[batch_index * batch_size : (batch_index + 1) * batch_size]
            hist_idx = np.array([data_idx - n for n in xrange(1, self.delay + 1)]).T
            for index_in_batch in range(batch_size) :
                print "Hidden CRBM (frame %d):" %(batch_index*batch_size+index_in_batch+1)
                print print_hidden(data_idx, hist_idx.ravel())[0][index_in_batch]
                print "-----------"
开发者ID:FrancoisLasson,项目名称:Temporal_DBN,代码行数:33,代码来源:DCRBM.py


示例4: test_random_integers_vector

    def test_random_integers_vector(self):
        rng_R = random_state_type()
        low = tensor.lvector()
        high = tensor.lvector()
        post_r, out = random_integers(rng_R, low=low, high=high)
        assert out.ndim == 1
        f = compile.function([rng_R, low, high], [post_r, out],
                             accept_inplace=True)

        low_val = [100, 200, 300]
        high_val = [110, 220, 330]
        rng = numpy.random.RandomState(utt.fetch_seed())
        numpy_rng = numpy.random.RandomState(utt.fetch_seed())

        # Arguments of size (3,)
        rng0, val0 = f(rng, low_val, high_val)
        numpy_val0 = numpy.asarray([numpy_rng.random_integers(low=lv, high=hv)
            for lv, hv in zip(low_val, high_val)])
        assert numpy.all(val0 == numpy_val0)

        # arguments of size (2,)
        rng1, val1 = f(rng0, low_val[:-1], high_val[:-1])
        numpy_val1 = numpy.asarray([numpy_rng.random_integers(low=lv, high=hv)
            for lv, hv in zip(low_val[:-1], high_val[:-1])])
        assert numpy.all(val1 == numpy_val1)

        # Specifying the size explicitly
        g = compile.function([rng_R, low, high],
                random_integers(rng_R, low=low, high=high, size=(3,)),
                accept_inplace=True)
        rng2, val2 = g(rng1, low_val, high_val)
        numpy_val2 = numpy.asarray([numpy_rng.random_integers(low=lv, high=hv)
            for lv, hv in zip(low_val, high_val)])
        assert numpy.all(val2 == numpy_val2)
        self.assertRaises(ValueError, g, rng2, low_val[:-1], high_val[:-1])
开发者ID:SamuelZeng,项目名称:Theano,代码行数:35,代码来源:test_raw_random.py


示例5: train_minibatch_fn

    def train_minibatch_fn(self, evaluate=False):
        """
        Initialize this Theano function once
        """
        X = T.lmatrix('X_train')
        L_x = T.lvector('L_X_train')

        Y = T.lmatrix('Y_train')
        L_y = T.lvector('L_y_train')

        learning_rate = T.dscalar('learning_rate')
        momentum = T.dscalar('momentum')
        weight_decay = T.dscalar('weight_decay')

        loss, accuracy = self.loss(X, L_x, Y, L_y, weight_decay)
        updates = self.get_sgd_updates(loss, learning_rate, momentum)

        outputs = [loss, accuracy]

        if evaluate:
            precision, recall = self.evaluate(X, L_x, Y, L_y)
            outputs = outputs + [precision, recall]

        return theano.function(
            inputs=[X, L_x, Y, L_y, learning_rate, momentum, weight_decay],
            outputs=outputs,
            updates=updates
        )
开发者ID:tivaro,项目名称:ULL-P2,代码行数:28,代码来源:end_to_end_model.py


示例6: __theano_init__

  def __theano_init__(self):

    # Theano tensor for I/O 
    X = T.lmatrix('X')
    Y = T.lvector('Y')
    N = T.lvector('N')

    # network structure
    l_in = L.layers.InputLayer(shape=(self.batch_size, self.n_gram), input_var = X)
    l_we = L.layers.EmbeddingLayer(l_in, self.vocab_size, self.word_dim, W = self.D)
    l_f1 = L.layers.DenseLayer(l_we, self.hidden_dim1, W = self.C, b = self.Cb)
    l_f2 = L.layers.DenseLayer(l_f1, self.hidden_dim2, W = self.M, b = self.Mb)
    l_out = L.layers.DenseLayer(l_f2, self.vocab_size, W = self.E, b = self.Eb, nonlinearity=None)
    
    # lasagne.layers.get_output produces a variable for the output of the net
    O = L.layers.get_output(l_out) # (batch_size, vocab_size)

    lossfunc = NCE(self.batch_size, self.vocab_size, self.noise_dist, self.noise_sample_size)
    loss = lossfunc.evaluate(O, Y, N)
    # loss = T.nnet.categorical_crossentropy(O, Y).mean()

    # Retrieve all parameters from the network
    all_params = L.layers.get_all_params(l_out, trainable=True)

    # Compute AdaGrad updates for training
    updates = L.updates.adadelta(loss, all_params)

    # Theano functions for training and computing cost
    self.train = theano.function([l_in.input_var, Y, N], loss, updates=updates, allow_input_downcast=True)
    self.compute_loss = theano.function([l_in.input_var, Y, N], loss, allow_input_downcast=True)
    self.weights = theano.function(inputs = [], outputs = [self.D, self.C, self.M, self.E, self.Cb, self.Mb, self.Eb])
开发者ID:shuoyangd,项目名称:nplm-theano,代码行数:31,代码来源:nplm.py


示例7: recognize_dataset

    def recognize_dataset(self, dataset_test = None, seqlen=None, batch_size =100) :
        #Here, we don't ignore the 6 first frames cause we want to test recognition performance on each frames of the test dataset
        n_test_batches = (dataset_test.get_value(borrow=True).shape[0]-(self.delay*self.freq)*len(seqlen)) / batch_size
        n_dim =  dataset_test.get_value(borrow=True).shape[1]

        # allocate symbolic variables for the data
        index = T.lvector()    # index to a [mini]batch
        index_hist = T.lvector()  # index to history

        input_log = T.nnet.sigmoid(T.dot(self.x, self.crbm_layer.W)+ T.dot(self.x_history, self.crbm_layer.B) + self.crbm_layer.hbias)
        prob = self.logLayer.p_y_given_x
        prediction = self.logLayer.y_pred

        print_prediction = theano.function(
            [index, index_hist],
            [prediction, prob],
            givens={ self.x:dataset_test[index],
                     self.x_history:dataset_test[index_hist].reshape((batch_size, self.delay * n_dim))
                    },
            name='print_prediction'
            )
        # valid starting indices
        datasetindex = range(self.delay*self.freq, dataset_test.get_value(borrow=True).shape[0])
        permindex = np.array(datasetindex)

        for batch_index in xrange(n_test_batches):
            data_idx = permindex[batch_index * batch_size : (batch_index + 1) * batch_size]
            hist_idx = np.array([data_idx - n*self.freq for n in xrange(1, self.delay + 1)]).T
            for index_in_batch in range(batch_size) :
                print "(frame %d):" %(batch_index*batch_size+index_in_batch+1)
                print "%% of recognition for each pattern : "
                print print_prediction(data_idx, hist_idx.ravel())[1][index_in_batch]
                print "So, recognized pattern is :"
                print print_prediction(data_idx, hist_idx.ravel())[0][index_in_batch]
                print "-----------"
开发者ID:FrancoisLasson,项目名称:Temporal_DBN,代码行数:35,代码来源:CRBMLogistic.py


示例8: pretraining_functions

    def pretraining_functions(self, train_set_x, batch_size, k, layer=0, static=False, with_W=False, binary=False):
        """Creates functions for doing CD

        Generates a function for performing one step of
        gradient descent at a given layer. The function will require
        as input the minibatch index, and to train an RBM you just
        need to iterate, calling the corresponding function on all
        minibatch indexes.

        Args:
            train_set_x: Shared var. that contains all datapoints used
                                for training the RBM
            batch_size: int, the size of each minibatch
            k: number of Gibbs steps to do in CD-k / PCD-k
            layer: which layer of the dbn to generate functions for
            static: if True, ignore all temporal components
            with_W: Whether or not to include the W in update
            binary: if true, make visible layer binary
        Returns:
            CD function

        """
        # allocate symbolic variables for the data
        index = T.lvector()  # index to a [mini]batch
        index_hist = T.lvector()  # index to history
        lr = T.dscalar()

        rbm = self.rbm_layers[layer]
        rbm.binary = binary
        # get the cost and the gradient corresponding to one step of CD-15
        cost, updates = rbm.get_cost_updates(k=k, static=static, with_W=with_W)

        #################################
        #     Training the RBM         #
        #################################
        if static:
            # updates only on non-temporal components
            fn = theano.function(
                [index, lr],
                outputs=cost,
                updates=updates,
                givens={self.x: train_set_x[index], self.lr: lr},
                name="train_tarbm_static",
            )
        else:
            # updates including temporal components
            fn = theano.function(
                [index, index_hist, lr],
                outputs=cost,
                updates=updates,
                givens={
                    self.x: train_set_x[index],
                    self.x_hist: train_set_x[index_hist].reshape((batch_size, self.delay * np.prod(self.n_ins))),
                    self.lr: lr,
                },
                name="train_tarbm",
            )
        return fn
开发者ID:chausler,项目名称:deep,代码行数:58,代码来源:tadbn.py


示例9: test

def test(model):
    dim = 128
    v_size = 7810
    margin = 1.0
    
    #load model
    f = open(model, 'rb')
    input_params = cPickle.load(f)
    emb, wx, wh, bh, wa = input_params
    f.close()
    
    embLayer = emb_layer(pre_train=emb, v = v_size, dim = dim) 
    rnnLayer = rnn_layer(input=None, wx=wx, wh=wh, bh=bh, emb_layer = embLayer, nh = dim) 
    att = attention_layer(input=None, rnn_layer=rnnLayer, margin = margin)

    q = T.lvector('q')
    a = T.lscalar('a')
    p = T.lvector('p')
    t = T.lscalar('t')
    inputs = [q,a,p,t]
    score = att.predict(inputs)
    pred = theano.function(inputs=inputs,outputs=score)

    pool = ThreadPool()

    f = open('./data/test-small.id','r')
    count = 1
    print 'time_b:%s' %time.clock()  
    to_pred = []
    for line in f:
        if count % 10000 == 0:
	    print count / 10000
	count += 1
        #print 'time_b:%s' %time.clock()  
        line = line[:-1]
        tmp = line.split('\t')
        in_q = numpy.array(tmp[0].split(' ')).astype(numpy.int) - 1
        in_a = int(tmp[1].split(' ')[2]) - 1
        in_p = numpy.array(tmp[1].split(' ')).astype(numpy.int) - 1
        in_t = int(tmp[2]) - 1
	lis = (in_q, in_a, in_p, in_t)
	to_pred.append(lis)
        #print 'time_load:%s' %time.clock()  
        #print 'time_score:%s' %time.clock()  
    f.close()

    ay = numpy.asarray(to_pred)
    #results = map(pred, list(ay[:,0]), list(ay[:,1]),list(ay[:,2]),list(ay[:,3]))
    results = pool.map(pred, to_pred)
    #results = []
    #for p in to_pred:
    #    results.append(att.predict(p,params))
    print 'time_e:%s' %time.clock()
    #print results
    pool.close()
    pool.join()
开发者ID:yzyz7,项目名称:attgit,代码行数:56,代码来源:batch_predict.py


示例10: test

def test(model):
    dim = 128
    v_size = 7810
    margin = 1.0
    
    #load model
    f = open(model, 'rb')
    input_params = cPickle.load(f)
    emb, wx, wh, bh, wa = input_params
    f.close()
    
    embLayer = emb_layer(pre_train=emb, v = v_size, dim = dim) 
    rnnLayer = rnn_layer(input=None, wx=wx, wh=wh, bh=bh, emb_layer = embLayer, nh = dim) 
    att = attention_layer(input=None, rnn_layer=rnnLayer, margin = margin)

    q = T.lvector('q')
    a = T.lscalar('a')
    p = T.lvector('p')
    t = T.lscalar('t')
    inputs = [q,a,p,t]

    #emb_num = T.lscalar('emb_num')
    #nh = T.scalar('nh')
    #dim = T.scalar('dim')
    score = att.predict(inputs)
    pred = theano.function(inputs=inputs,outputs=score)

    

    wf = open('./data/res','w')
    f = open('./data/test.id','r')
    count = 1
    print 'time_b:%s' %time.clock()  
    for line in f:
        if count % 10000 == 0:
	    print count / 10000
            print 'time_1w:%s' %time.clock()  
	count += 1
        #print 'time_b:%s' %time.clock()  
        line = line[:-1]
        tmp = line.split('\t')
        in_q = numpy.array(tmp[0].split(' ')).astype(numpy.int) - 1
        #x = emb[q].reshape((q.shape[0], emb.shape[1]))
        in_a = int(tmp[1].split(' ')[2]) - 1
        in_p = numpy.array(tmp[1].split(' ')).astype(numpy.int) - 1
        in_t = int(tmp[2]) - 1
        #in_lis =  [in_q, in_a, in_p, in_t]
        #print 'time_load:%s' %time.clock()  
	s = pred(in_q, in_a, in_p, in_t)
	#print s
        wf.write(str(s) + '\n')
        #print 'time_score:%s' %time.clock()  
    f.close()
    wf.close()
开发者ID:yzyz7,项目名称:attgit,代码行数:54,代码来源:test_att.py


示例11: test_no_reuse

def test_no_reuse():
    x = T.lvector()
    y = T.lvector()
    f = theano.function([x, y], x + y)

    #provide both inputs in the first call
    f(numpy.ones(10, dtype='int64'), numpy.ones(10, dtype='int64'))

    try:
        f(numpy.ones(10))
    except TypeError:
        return
    assert not 'should not get here'
开发者ID:Stan-ST-SUN,项目名称:Theano,代码行数:13,代码来源:test_gc.py


示例12: run_mlp

def run_mlp(train_data, valid_data, valid_score, test_data, test_score, We_init, options):

    tmp = np.diag(np.ones(options.dim, dtype='float32'))
    W_init = np.asarray(np.concatenate((tmp, tmp), axis=0))

    g1batchindices = T.lvector(); g2batchindices = T.lvector()
    p1batchindices = T.lvector(); p2batchindices = T.lvector()

    # Create an instance of the MLP class
    mlp = Layer(We_init, W_init, T.tanh,  options.lamda_w, options.lamda_ww)

    #compute phrase vectors
    bigram_output = theano.function([g1batchindices, g2batchindices], mlp.output(g1batchindices, g2batchindices))

    cost = squared_error(mlp, g1batchindices, g2batchindices, p1batchindices, p2batchindices)

    cost = cost + mlp.word_reg

    updates = adagrad(cost, mlp.params, learning_rate=0.005, epsilon=1e-6)

    train_model = theano.function([g1batchindices, g2batchindices, p1batchindices, p2batchindices], cost, updates=updates)

    # compute number of minibatches for training
    batch_size = int(options.batchsize)
    n_train_batches = int(len(train_data) * 1.0 // batch_size)

    iteration = 0

    max_iteration = options.epochs

    while iteration < max_iteration:
        iteration += 1

        seed = range(len(train_data))
        random.shuffle(seed)
        train_data = [train_data[i] for i in seed]

        score = valid_model(bigram_output, valid_data, valid_score)

        accuary = test_model(bigram_output, test_data, test_score)

        print "iteration: {0}   valid_score: {1}   test_score: {2}".format(iteration, score[0], accuary[0])

        for minibatch_index in range(n_train_batches):

            train_data_batch = train_data[minibatch_index * batch_size : (minibatch_index + 1) * batch_size]
            train_data_batch_x1 = [i[0][0] for i in train_data_batch]
            train_data_batch_x2 = [i[0][1] for i in train_data_batch]
            train_data_batch_y1 = [i[1][0] for i in train_data_batch]
            train_data_batch_y2 = [i[1][1] for i in train_data_batch]
            train_model(train_data_batch_x1, train_data_batch_x2, train_data_batch_y1, train_data_batch_y2)
开发者ID:wangshaonan,项目名称:Phrase-representation,代码行数:51,代码来源:phrase_recnn_mse.py


示例13: propup

    def propup(self, data, layer=0, static=False):
        """
        propogate the activity through layer 0 to the hidden layer and return
        an array of [2, samples, dimensions]
        where the first 2 dimensions are
        [pre_sigmoid_activation, T.nnet.sigmoid(pre_sigmoid_activation)]
        so far only works for the first rbm layer
        """
        if not isinstance(data, theano.tensor.sharedvar.TensorSharedVariable):
            data = theano.shared(data)

        # allocate symbolic variables for the data
        index = T.lvector()  # index to a [mini]batch
        index_hist = T.lvector()  # index to history
        rbm = self.rbm_layers[layer]

        #################################
        #     Training the CRBM         #
        #################################
        # get the cost and the gradient corresponding to one step of CD-15
        [pre_sig, post_sig] = rbm.propup(static)

        if static:
            # the purpose of train_crbm is solely to update the CRBM parameters
            fn = theano.function([], outputs=[pre_sig, post_sig], givens={self.x: data}, name="propup_tarbm_static")
            return np.array(fn())

        else:
            # indexing is slightly complicated
            # build a linear index to the starting frames for this batch
            # (i.e. time t) gives a batch_size length array for data
            data_idx = np.arange(self.delay, data.get_value(borrow=True).shape[0])

            # now build a linear index to the frames at each delay tap
            # (i.e. time t-1 to t-delay)
            # gives a batch_size x delay array of indices for history
            hist_idx = np.array([data_idx - n for n in xrange(1, self.delay + 1)]).T

            # the purpose of train_crbm is solely to update the CRBM parameters
            fn = theano.function(
                [index, index_hist],
                outputs=[pre_sig, post_sig],
                givens={
                    self.x: data[index],
                    self.x_hist: data[index_hist].reshape((len(data_idx), self.delay * np.prod(self.n_ins))),
                },
                name="train_tarbm",
            )

            return np.array(fn(data_idx, hist_idx.ravel()))
开发者ID:chausler,项目名称:deep,代码行数:50,代码来源:tadbn.py


示例14: _compile_bp

    def _compile_bp(self):
        '''
        compile backpropagation foreach of the dqns.
        '''
        self.bprop_by_goal = {}
        for (goal, dqn) in self.dqn_by_goal.items():
            states = dqn.states
            action_values = dqn.action_values
            params = dqn.params
            targets = T.vector('target')
            last_actions = T.lvector('action')

            # loss function.
            mse = layers.MSE(action_values[T.arange(action_values.shape[0]),
                                last_actions], targets)
            # l2 penalty.
            l2_penalty = 0.
            for param in params:
                l2_penalty += (param ** 2).sum()

            cost = mse + self.l2_reg * l2_penalty

            # back propagation.
            updates = optimizers.Adam(cost, params, alpha=self.lr)

            td_errors = T.sqrt(mse)
            self.bprop_by_goal[goal] = theano.function(inputs=[states, last_actions, targets],
                                        outputs=td_errors, updates=updates)
开发者ID:amoliu,项目名称:curriculum-deep-RL,代码行数:28,代码来源:uvfa.py


示例15: GRU_question

    def GRU_question(self, dimension_fact_embedding, num_hidden_units_questions, num_hidden_units_episodes, max_question_len, dimension_word_embeddings):

        self.question_idxs = T.lmatrix("question_indices") # as many columns as words in the context window and as many lines as words in the sentence
        self.question_mask = T.lvector("question_mask")
        q = self.emb[self.question_idxs].reshape((self.question_idxs.shape[0], dimension_word_embeddings)) # x basically represents the embeddings of the words IN the current sentence.  So it is shape

        def slice_w(x, n):
            return x[n*num_hidden_units_questions:(n+1)*num_hidden_units_questions]

        def question_gru_recursion(x_cur, h_prev, q_mask):

            W_in_stacked = T.concatenate([self.W_question_reset_gate_x, self.W_question_update_gate_x, self.W_question_hidden_gate_x], axis=1)
            W_hid_stacked = T.concatenate([self.W_question_reset_gate_h, self.W_question_update_gate_h, self.W_question_hidden_gate_h], axis=1)

            input_n = T.dot(x_cur, W_in_stacked)
            hid_input = T.dot(h_prev, W_hid_stacked)
            resetgate = slice_w(hid_input, 0) + slice_w(input_n, 0)
            updategate = slice_w(hid_input, 1) + slice_w(input_n, 1)
            resetgate = T.tanh(resetgate)
            updategate = T.tanh(updategate)

            hidden_update = slice_w(input_n, 2) + resetgate * slice_w(hid_input, 2)
            hidden_update = T.tanh(hidden_update)
            h_cur = (1 - updategate) * hidden_update + updategate * hidden_update

            h_cur = q_mask * h_cur + (1 - q_mask) * h_prev
            # h_cur = T.tanh(T.dot(self.W_fact_to_hidden, x_cur) + T.dot(self.W_hidden_to_hidden, h_prev))
            return h_cur

        state = self.h0_questions
        for jdx in range(max_question_len):
            state = question_gru_recursion(q[jdx], state, self.question_mask[jdx])

        return T.tanh(T.dot(state, self.W_question_to_vector) + self.b_question_to_vector)
开发者ID:danstrawser,项目名称:Nlp2Commands,代码行数:34,代码来源:DMN_SimplerGate.py


示例16: test_binomial_vector

    def test_binomial_vector(self):
        rng_R = random_state_type()
        n = tensor.lvector()
        prob = tensor.vector()
        post_r, out = binomial(rng_R, n=n, p=prob)
        assert out.ndim == 1
        f = compile.function([rng_R, n, prob], [post_r, out],
                             accept_inplace=True)

        n_val = [1, 2, 3]
        prob_val = numpy.asarray([.1, .2, .3], dtype=config.floatX)
        rng = numpy.random.RandomState(utt.fetch_seed())
        numpy_rng = numpy.random.RandomState(utt.fetch_seed())

        # Arguments of size (3,)
        rng0, val0 = f(rng, n_val, prob_val)
        numpy_val0 = numpy_rng.binomial(n=n_val, p=prob_val)
        assert numpy.all(val0 == numpy_val0)

        # arguments of size (2,)
        rng1, val1 = f(rng0, n_val[:-1], prob_val[:-1])
        numpy_val1 = numpy_rng.binomial(n=n_val[:-1], p=prob_val[:-1])
        assert numpy.all(val1 == numpy_val1)

        # Specifying the size explicitly
        g = compile.function([rng_R, n, prob],
                binomial(rng_R, n=n, p=prob, size=(3,)),
                accept_inplace=True)
        rng2, val2 = g(rng1, n_val, prob_val)
        numpy_val2 = numpy_rng.binomial(n=n_val, p=prob_val, size=(3,))
        assert numpy.all(val2 == numpy_val2)
        self.assertRaises(ValueError, g, rng2, n_val[:-1], prob_val[:-1])
开发者ID:SamuelZeng,项目名称:Theano,代码行数:32,代码来源:test_raw_random.py


示例17: test_softmax_optimizations_w_bias_vector

    def test_softmax_optimizations_w_bias_vector(self):
        x = tensor.vector('x')
        b = tensor.vector('b')
        one_of_n = tensor.lvector('one_of_n')
        op = crossentropy_categorical_1hot
        fgraph = gof.FunctionGraph(
                [x, b, one_of_n],
                [op(softmax(x + b), one_of_n)])
        assert fgraph.outputs[0].owner.op == op
        #print 'BEFORE'
        #for node in fgraph.toposort():
        #    print node.op
        #print printing.pprint(node.outputs[0])
        #print '----'

        theano.compile.mode.optdb.query(
                theano.compile.mode.OPT_FAST_RUN).optimize(fgraph)
        #print 'AFTER'
        #for node in fgraph.toposort():
        #    print node.op
        #print '===='
        assert len(fgraph.toposort()) == 3
        assert str(fgraph.outputs[0].owner.op) == 'OutputGuard'
        assert (fgraph.outputs[0].owner.inputs[0].owner.op ==
                crossentropy_softmax_argmax_1hot_with_bias)
开发者ID:repos-python,项目名称:Theano,代码行数:25,代码来源:test_nnet.py


示例18: train_dA

def train_dA(lr=0.1, training_epochs=15, params_dict = False, print_every = 100,
            data=None):

    x = T.lvector('x')
    input_size = T.scalar(dtype='int64')
    dA = make_dA(params=params_dict, input_size=input_size, data=x)
    cost, updates, output = dA.get_cost_updates(lr=lr)

    model = theano.function(
        [x],
        [cost, output],
        updates=updates,
        givens={input_size: x.shape[0]}
    )

    start_time = time.clock()
    for epoch in xrange(training_epochs):
        cost_history = []
        for index in range(len(data)):
            cost, predict= model(data[index])
            cost_history.append(cost)
            if index % print_every == 0:
                print 'Iteration %d, cost %f' % (index, cost)
                print predict
        print 'Training epoch %d, cost ' % epoch, numpy.mean(cost_history)

    training_time = (time.clock() - start_time)

    print 'Finished training %d epochs, took %d seconds' % (training_epochs, training_time)

    return cost_history, dA.get_params(), model
开发者ID:sqxiang,项目名称:RNN-LSTM,代码行数:31,代码来源:rnn_encoder_decoder.py


示例19: __init__

    def __init__(self, config=None, defaults=defaults, inputs_hook=None, hiddens_hook=None, params_hook=None,
                 use_data_layer=None, rand_crop=None, batch_size=None):
        # combine everything by passing to Model's init
        super(AlexNet, self).__init__(**{arg: val for (arg, val) in locals().iteritems() if arg is not 'self'})
        # configs can now be accessed through self dictionary

        if self.inputs_hook or self.hiddens_hook or self.params_hook:
            log.error("Inputs_hook, hiddens_hook, and params_hook not implemented yet for AlexNet!")

        self.flag_datalayer = self.use_data_layer

        ####################
        # Theano variables #
        ####################
        # allocate symbolic variables for the data
        # 'rand' is a random array used for random cropping/mirroring of data
        self.x = T.ftensor4('x')
        self.y = T.lvector('y')
        self.rand = T.fvector('rand')

        ##########
        # params #
        ##########
        self.params = []

        # make the network!
        self.build_computation_graph()
开发者ID:chagge,项目名称:OpenDeep,代码行数:27,代码来源:convolutional_network.py


示例20: test_optimize_xent_vector2

    def test_optimize_xent_vector2(self):
        verbose = 0
        mode = theano.compile.mode.get_default_mode()
        if mode == theano.compile.mode.get_mode('FAST_COMPILE'):
            mode = 'FAST_RUN'
        rng = numpy.random.RandomState(utt.fetch_seed())
        x_val = rng.randn(5)
        b_val = rng.randn(5)
        y_val = numpy.asarray([2])

        x = T.dvector('x')
        b = T.dvector('b')
        y = T.lvector('y')

        def print_graph(func):
            for i, node in enumerate(func.maker.fgraph.toposort()):
                print i, node
            # Last node should be the output
            print i, printing.pprint(node.outputs[0])
            print

        ## Test that a biased softmax is optimized correctly
        bias_expressions = [
                T.sum(-T.log(softmax(x + b)[T.arange(y.shape[0]), y])),
                -T.sum(T.log(softmax(b + x)[T.arange(y.shape[0]), y])),
                -T.sum(T.log(softmax(x + b))[T.arange(y.shape[0]), y]),
                T.sum(-T.log(softmax(b + x))[T.arange(y.shape[0]), y])]

        for expr in bias_expressions:
            f = theano.function([x, b, y], expr, mode=mode)
            if verbose:
                print_graph(f)
            try:
                prev, last = f.maker.fgraph.toposort()[-2:]
                assert len(f.maker.fgraph.toposort()) == 3
                # [big_op, sum, dim_shuffle]
                f(x_val, b_val, y_val)
            except Exception:
                theano.printing.debugprint(f)
                raise

            backup = config.warn.sum_div_dimshuffle_bug
            config.warn.sum_div_dimshuffle_bug = False
            try:
                g = theano.function([x, b, y], T.grad(expr, x), mode=mode)
            finally:
                config.warn.sum_div_dimshuffle_bug = backup

            if verbose:
                print_graph(g)
            try:
                ops = [node.op for node in g.maker.fgraph.toposort()]
                assert len(ops) <= 6
                assert crossentropy_softmax_1hot_with_bias_dx in ops
                assert softmax_with_bias in ops
                assert softmax_grad not in ops
                g(x_val, b_val, y_val)
            except Exception:
                theano.printing.debugprint(g)
                raise
开发者ID:srifai,项目名称:Theano,代码行数:60,代码来源:test_nnet.py



注:本文中的theano.tensor.lvector函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。


鲜花

握手

雷人

路过

鸡蛋
该文章已有0人参与评论

请发表评论

全部评论

专题导读
上一篇:
Python tensor.matrices函数代码示例发布时间:2022-05-27
下一篇:
Python tensor.lt函数代码示例发布时间:2022-05-27
热门推荐
阅读排行榜

扫描微信二维码

查看手机版网站

随时了解更新最新资讯

139-2527-9053

在线客服(服务时间 9:00~18:00)

在线QQ客服
地址:深圳市南山区西丽大学城创智工业园
电邮:jeky_zhao#qq.com
移动电话:139-2527-9053

Powered by 互联科技 X3.4© 2001-2213 极客世界.|Sitemap