• 设为首页
  • 点击收藏
  • 手机版
    手机扫一扫访问
    迪恩网络手机版
  • 关注官方公众号
    微信扫一扫关注
    公众号

Python tensor.lscalar函数代码示例

原作者: [db:作者] 来自: [db:来源] 收藏 邀请

本文整理汇总了Python中theano.tensor.lscalar函数的典型用法代码示例。如果您正苦于以下问题:Python lscalar函数的具体用法?Python lscalar怎么用?Python lscalar使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。



在下文中一共展示了lscalar函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。

示例1: build

    def build(self):
        
        # input and output variables
        x = T.matrix('x')
        y = T.matrix('y')
        index = T.lscalar() 
        batch_count = T.lscalar() 
        LR = T.scalar('LR', dtype=theano.config.floatX)
        M = T.scalar('M', dtype=theano.config.floatX)

        # before the build, you work with symbolic variables
        # after the build, you work with numeric variables
        
        self.train_batch = theano.function(inputs=[index,LR,M], updates=self.model.updates(x,y,LR,M),givens={ 
                x: self.shared_x[index * self.batch_size:(index + 1) * self.batch_size], 
                y: self.shared_y[index * self.batch_size:(index + 1) * self.batch_size]},
                name = "train_batch", on_unused_input='warn')
        
        self.test_batch = theano.function(inputs=[index],outputs=self.model.errors(x,y),givens={
                x: self.shared_x[index * self.batch_size:(index + 1) * self.batch_size], 
                y: self.shared_y[index * self.batch_size:(index + 1) * self.batch_size]},
                name = "test_batch")
                
        if self.format == "DFXP" :  
            self.update_range = theano.function(inputs=[batch_count],updates=self.model.range_updates(batch_count), name = "update_range")
开发者ID:spideryan,项目名称:deep-learning-storage,代码行数:25,代码来源:trainer.py


示例2: getTrainModel

    def getTrainModel(self, data_x, data_y, data_sm):
        self.ngram_start_index = T.lscalar()
        self.ngram_end_index = T.lscalar()
        self.sm_start_index = T.lscalar()
        self.sm_end_index = T.lscalar()
        self.learning_rate = T.scalar()

        # TRAIN_MODEL
        self.train_outputs = [self.cost, self.grad_norm]
        self.train_set_x, self.train_set_y, self.train_set_sm = io_read_ngram.shared_dataset([data_x, data_y, data_sm])
        self.int_train_set_y = T.cast(self.train_set_y, "int32")
        self.train_model = theano.function(
            inputs=[
                self.ngram_start_index,
                self.ngram_end_index,
                self.sm_start_index,
                self.sm_end_index,
                self.learning_rate,
            ],
            outputs=self.train_outputs,
            updates=self.updates,
            givens={
                self.x: self.train_set_x[self.ngram_start_index : self.ngram_end_index],
                self.y: self.int_train_set_y[self.ngram_start_index : self.ngram_end_index],
                self.sm: self.train_set_sm[self.sm_start_index : self.sm_end_index],
                self.lr: self.learning_rate,
            },
        )

        return self.train_model
开发者ID:batman2013,项目名称:nnjm-global,代码行数:30,代码来源:model_global.py


示例3: compile

    def compile(self, objective, optimizer, constraints=None):
        if not constraints:
            constraints = [lambda x: x for _ in self.params]

        # Dummy variables as placeholder for training data,
        # which need to be shared tensor variables
        self.X_train = shared_vals(np.zeros((2, 2)), name='X_train')
        self.Y_train = shared_vals(np.zeros((2, 2)), name='Y_train')

        batch_ix = T.lscalar('ix')
        batch_size = T.lscalar('size')
        y_sym = T.matrix('Y')
        loss = objective(y_sym, self.output)
        updates = optimizer.get_updates(self.params, constraints, loss)
        self.train = theano.function(
            inputs=[batch_ix, batch_size],
            outputs=loss,
            updates=updates,
            givens={
                self.X: self.X_train[batch_ix * batch_size: (batch_ix + 1) * batch_size],
                y_sym : self.Y_train[batch_ix * batch_size: (batch_ix + 1) * batch_size]
            }
        )

        self._predict = theano.function(
            inputs=[self.X],
            outputs=self.output
        )
开发者ID:bitwise-ben,项目名称:lego,代码行数:28,代码来源:lego.py


示例4: trainer

def trainer(X,Y,alpha,lr,predictions,updates,data,labels):
	data   = U.create_shared(data,  dtype=np.int8)
	labels = U.create_shared(labels,dtype=np.int8)
	index_start = T.lscalar('start')
	index_end   = T.lscalar('end')
	print "Compiling function..."
	train_model = theano.function(
			inputs  = [index_start,index_end,alpha,lr],
			outputs = T.mean(T.neq(T.argmax(predictions, axis=1), Y)),
			updates = updates,
			givens  = {
				X:   data[index_start:index_end],
				Y: labels[index_start:index_end]
			}
		)
	test_model = theano.function(
			inputs  = [index_start,index_end],
			outputs = T.mean(T.neq(T.argmax(predictions, axis=1), Y)),
			givens  = {
				X:   data[index_start:index_end],
				Y: labels[index_start:index_end]
			}
		)
	print "Done."
	return train_model,test_model
开发者ID:Niyikiza,项目名称:rnn-experiment,代码行数:25,代码来源:genchar_mult.py


示例5: fiting_variables

    def fiting_variables(self, batch_size, train_set_x, train_set_y, test_set_x=None):
        """Sets useful variables for locating batches"""    
        self.index = T.lscalar('index')    # index to a [mini]batch
        self.n_ex = T.lscalar('n_ex')      # total number of examples

        assert type(batch_size) is IntType or FloatType, "Batch size must be an integer."
        if type(batch_size) is FloatType:
            warnings.warn('Provided batch_size is FloatType, value has been truncated')
            batch_size = int(batch_size)
        # Proper implementation of variable-batch size evaluation
        # Note that the last batch may be a smaller size
        # So we keep around the effective_batch_size (whose last element may
        # be smaller than the rest)
        # And weight the reported error by the batch_size when we average
        # Also, by keeping batch_start and batch_stop as symbolic variables,
        # we make the theano function easier to read
        self.batch_start = self.index * batch_size
        self.batch_stop = T.minimum(self.n_ex, (self.index + 1) * batch_size)
        self.effective_batch_size = self.batch_stop - self.batch_start

        self.get_batch_size = theano.function(inputs=[self.index, self.n_ex],
                                          outputs=self.effective_batch_size)

        # compute number of minibatches for training
        # note that cases are the second dimension, not the first
        self.n_train = train_set_x.get_value(borrow=True).shape[0]
        self.n_train_batches = int(np.ceil(1.0 * self.n_train / batch_size))
        if test_set_x is not None:
            self.n_test = test_set_x.get_value(borrow=True).shape[0]
            self.n_test_batches = int(np.ceil(1.0 * self.n_test / batch_size))
开发者ID:pabaldonedo,项目名称:stochastic_fnn,代码行数:30,代码来源:lbn.py


示例6: __compileFunctions

    def __compileFunctions(self):

        self.__logger.info("Compiling computational graph:")

        index = T.lscalar('index')

        miniBatchSize = T.lscalar('miniBatchSize')


        self.__logger.info(" - Setting up and compiling outputs")
        self.__setUpOutputs(self.input)

        self.__logger.info(" - Setting up and compiling cost functions")
        self.__setUpCostFunctions(self.input,
                                  self.output,
                                  self.supCostWeight,
                                  self.unsupCostWeight)

        self.__logger.info(" - Setting up and compiling optimizers")
        self.__setUpOptimizers(index,
                               miniBatchSize,
                               self.input,
                               self.output,
                               self.epsilon,
                               self.decay,
                               self.momentum)

        self.__setUpHelpers(index,miniBatchSize)
开发者ID:terkkila,项目名称:cgml,代码行数:28,代码来源:computational_graph.py


示例7: pretraining_functions

 def pretraining_functions(self, train_set_x, train_set_y, batch_size):
     index = tensor.lscalar('index')  
     index = tensor.lscalar('index')  
     corruption_level = tensor.scalar('corruption')  
     corruption_level = tensor.scalar('corruption')  
     learning_rate = tensor.scalar('lr')  
     learning_rate = tensor.scalar('lr')  
     switch = tensor.iscalar('switch')
     n_batches = train_set_x.get_value(borrow=True).shape[0] / batch_size
     batch_begin = index * batch_size
     batch_end = batch_begin + batch_size
     pretrain_fns = []
     for sugar in self.sugar_layers:
         cost, updates = sugar.get_cost_updates(corruption_level,
                                             learning_rate,
                                             switch)
         fn = function(inputs=[index,
                                      Param(corruption_level, default=0.2),
                                      Param(learning_rate, default=0.1),
                                      Param(switch, default=1)],
                              outputs=[cost],
                              updates=updates,
                              givens={self.x: train_set_x[batch_begin:batch_end],
                                      self.y: train_set_y[batch_begin:batch_end]}, on_unused_input='ignore')
         pretrain_fns.append(fn)
     return pretrain_fns
开发者ID:lucktroy,项目名称:sugar,代码行数:26,代码来源:deepSUGAR.py


示例8: __init__

 def __init__( self, da, stop_val, corruption, rate, train_path, test_path ):
     self.fid = open( 'output.txt', 'r+' )
     self.model = da
     self.stop_val = stop_val
     self.last_cost = 9999
     self.train_path = train_path
     self.test_path = test_path
     
     self.train_set = numpy.load( train_path )
     self.test_set = numpy.load( test_path )
     
     self.shared_train = theano.shared( self.train_set )
     self.shared_test = theano.shared( self.test_set )
     self.print_set( self.shared_train, "train_set" )
     self.print_set( self.shared_test, "test_set" )
     
     self.learning_rate = rate
     self.corruption_level = corruption
     
     self.start_index = T.lscalar()
     self.end_index = T.lscalar()
     self.cost, self.updates = da.get_cost_updates( corruption, rate )
     
     self.train = theano.function( [ self.start_index, self.end_index ], self.cost, updates = self.updates,
         givens = { da.x : self.shared_train [ self.start_index : self.end_index ] } )
     self.test = theano.function( [ self.start_index, self.end_index ], self.cost, updates = self.updates,
         givens = { da.x : self.shared_test [ self.start_index : self.end_index ] } )
开发者ID:MrWolvwxyz,项目名称:sparseAutoEncoder,代码行数:27,代码来源:theano_class.py


示例9: train_rnn

def train_rnn():
    rng = numpy.random.RandomState(1234)

    q = T.lvector("q")
    pos = T.lscalar("pos")
    neg = T.lscalar("neg")
    inputs = [q, pos, neg]

    embLayer = emb_layer(None, 100, 5)
    rnn = rnn_layer(input=inputs, emb_layer=embLayer, nh=5)

    cost = rnn.loss()
    gradient = T.grad(cost, rnn.params)
    lr = 0.001
    updates = OrderedDict((p, p - lr * g) for p, g in zip(rnn.params, gradient))
    train = theano.function(inputs=[q, pos, neg], outputs=cost, updates=updates)

    print rnn.emb.eval()[0]
    e0 = rnn.emb.eval()

    for i in range(0, 3):
        idq = rng.randint(size=10, low=0, high=100)
        idpos = rng.random_integers(100)
        idneg = rng.random_integers(100)

        train(idq, idpos, idneg)
        rnn.normalize()

        print rnn.emb.eval() - e0
开发者ID:yzyz7,项目名称:attgit,代码行数:29,代码来源:train.py


示例10: test_doc

    def test_doc(self):
        """Ensure the code given in pfunc.txt works as expected"""

        # Example #1.
        a = lscalar()
        b = shared(1)
        f1 = pfunc([a], (a + b))
        f2 = pfunc([Param(a, default=44)], a + b, updates={b: b + 1})
        self.assertTrue(b.get_value() == 1)
        self.assertTrue(f1(3) == 4)
        self.assertTrue(f2(3) == 4)
        self.assertTrue(b.get_value() == 2)
        self.assertTrue(f1(3) == 5)
        b.set_value(0)
        self.assertTrue(f1(3) == 3)

        # Example #2.
        a = tensor.lscalar()
        b = shared(7)
        f1 = pfunc([a], a + b)
        f2 = pfunc([a], a * b)
        self.assertTrue(f1(5) == 12)
        b.set_value(8)
        self.assertTrue(f1(5) == 13)
        self.assertTrue(f2(4) == 32)
开发者ID:gwtaylor,项目名称:Theano,代码行数:25,代码来源:test_pfunc.py


示例11: train_model

    def train_model(self, X_train, Y_train, X_valid, Y_valid,
                    num_epochs=3000, learning_rate=0.001, batch_size=20,
                    L1_reg=0., L2_reg=0.):

        logging.info('... training model (learning_rate: %f)' % learning_rate)

        cost = self.NLL + L1_reg*self.L1 + L2_reg*self.L2_sqr

        grads = T.grad(cost=cost, wrt=self.params)
        updates = [[param, param - learning_rate*grad]
                   for param, grad in zip(self.params, grads)]

        start = T.lscalar()
        end = T.lscalar()

        train = theano.function(
            inputs=[start, end],
            outputs=cost,
            updates=updates,
            givens={
                self.X: X_train[start:end],
                self.Y: Y_train[start:end]
            }
        )

        validate = theano.function(
            inputs=[start, end],
            outputs=[cost, self.py_x],
            givens={
                self.X: X_valid[start:end],
                self.Y: Y_valid[start:end]
            }
        )

        m_train = X_train.get_value(borrow=True).shape[0]
        m_valid = X_valid.get_value(borrow=True).shape[0]

        stopping_criteria = StoppingCriteria()
        index = range(0, m_train+1, batch_size)

        y_valid = np.argmax(Y_valid.get_value(borrow=True), axis=1)
        for i in range(num_epochs):
            costs = [train(index[j], index[j+1]) for j in range(len(index)-1)]
            E_tr = np.mean(costs)

            E_va, py_x = validate(0, m_valid)
            y_pred = np.argmax(py_x, axis=1)
            A_valid = AccuracyTable(y_pred, y_valid)

            stopping_criteria.append(E_tr, E_va)
            logging.debug('epoch %3d/%d. Cost: %f  Validation: Q3=%.2f%% C3=%f'
                          '(%.2f %.2f %.2f)',
                          i+1, num_epochs, E_tr, A_valid.Q3, A_valid.C3,
                          A_valid.Ch, A_valid.Ce, A_valid.Cc)

            if stopping_criteria.PQ(1):
                logging.debug('Early Stopping!')
                break

        return stopping_criteria
开发者ID:junshuai,项目名称:PSSPred,代码行数:60,代码来源:psspred.py


示例12: test_argsort

def test_argsort():
    # Set up
    rng = np.random.RandomState(seed=utt.fetch_seed())
    m_val = rng.rand(3, 2)
    v_val = rng.rand(4)

    # Example 1
    a = tensor.dmatrix()
    w = argsort(a)
    f = theano.function([a], w)
    gv = f(m_val)
    gt = np.argsort(m_val)
    assert np.allclose(gv, gt)

    # Example 2
    a = tensor.dmatrix()
    axis = tensor.lscalar()
    w = argsort(a, axis)
    f = theano.function([a, axis], w)
    for axis_val in 0, 1:
        gv = f(m_val, axis_val)
        gt = np.argsort(m_val, axis_val)
        assert np.allclose(gv, gt)

    # Example 3
    a = tensor.dvector()
    w2 = argsort(a)
    f = theano.function([a], w2)
    gv = f(v_val)
    gt = np.argsort(v_val)
    assert np.allclose(gv, gt)

    # Example 4
    a = tensor.dmatrix()
    axis = tensor.lscalar()
    l = argsort(a, axis, "mergesort")
    f = theano.function([a, axis], l)
    for axis_val in 0, 1:
        gv = f(m_val, axis_val)
        gt = np.argsort(m_val, axis_val)
        assert np.allclose(gv, gt)

    # Example 5
    a = tensor.dmatrix()
    axis = tensor.lscalar()
    a1 = ArgSortOp("mergesort", [])
    a2 = ArgSortOp("quicksort", [])
    # All the below should give true
    assert a1 != a2
    assert a1 == ArgSortOp("mergesort", [])
    assert a2 == ArgSortOp("quicksort", [])

    # Example 6: Testing axis=None
    a = tensor.dmatrix()
    w2 = argsort(a, None)
    f = theano.function([a], w2)
    gv = f(m_val)
    gt = np.argsort(m_val, None)
    assert np.allclose(gv, gt)
开发者ID:12190143,项目名称:Theano,代码行数:59,代码来源:test_sort.py


示例13: predict

 def predict(self, X):
     start = T.lscalar()
     end = T.lscalar()
     return theano.function(
         inputs=[start, end],
         outputs=self.py_x,
         givens={self.X: X[start:end]}
     )
开发者ID:junshuai,项目名称:PSSPred,代码行数:8,代码来源:psspred.py


示例14: pretraining_functions

    def pretraining_functions(self, train_set_x, train_set_y, alpha, batch_size):
        ''' Generates a list of functions, each of them implementing one
        component (sub-CNN) in trainnig the iCNN.
        The function will require as input the minibatch index, and to train
        a sub-CNN you just need to iterate, calling the corresponding function on
        all minibatch indexes.

        :type train_set_x: theano.tensor.TensorType
        :param train_set_x: Shared variable that contains all datapoints used
                            for training the sub-CNN

        : train_set_y: ...

        :type batch_size: int
        :param batch_size: size of a [mini]batch

        '''

        index = T.lscalar('index')  # index to a minibatch
        learning_rate = T.scalar('learning_rate')  # learning rate to use
        # number of batches
        #n_batches = int(math.ceil(train_set_x.get_value(borrow=True).shape[0] / batch_size))
        # begining of a batch, given `index`
        batch_begin = index * batch_size
        # ending of a batch given `index`
        batch_end = batch_begin + batch_size

        pretrain_fns = []
        for subcnn in self.subcnns:
            # create a function to compute the mistakes that are made by the model
            index = T.lscalar('index')  # index to a [mini]batch
            #batch_size_var = T.lscalar('batch_size_var')  # batch_size
            # compute the gradients with respect to the model parameters
            grads = T.grad(subcnn.cost, subcnn.params_pretrain)
        
            # add momentum
            # initialize the delta_i-1
            delta_before=[]
            for param_i in subcnn.params:
                delta_before_i=theano.shared(value=numpy.zeros(param_i.get_value().shape))
                delta_before.append(delta_before_i)
        
            updates = []
            for param_i, grad_i, delta_before_i in zip(subcnn.params, grads, delta_before):
                delta_i=-learning_rate * grad_i + alpha*delta_before_i
                updates.append((param_i, param_i + delta_i ))
                updates.append((delta_before_i,delta_i))
            # compile the theano function
            fn = theano.function([index,theano.Param(learning_rate, default=0.1)], [subcnn.cost,subcnn.errors], updates=updates,
                                      givens={
                                      self.x: train_set_x[index*batch_size:(index+1)*batch_size],
                                      self.y: train_set_y[index*batch_size:(index+1)*batch_size]})
            
            # append `fn` to the list of functions
            pretrain_fns.append(fn)

        return pretrain_fns
开发者ID:yifeng-li,项目名称:DECRES,代码行数:57,代码来源:icnn_dfs.py


示例15: test

def test(model):
    dim = 128
    v_size = 7810
    margin = 1.0
    
    #load model
    f = open(model, 'rb')
    input_params = cPickle.load(f)
    emb, wx, wh, bh, wa = input_params
    f.close()
    
    embLayer = emb_layer(pre_train=emb, v = v_size, dim = dim) 
    rnnLayer = rnn_layer(input=None, wx=wx, wh=wh, bh=bh, emb_layer = embLayer, nh = dim) 
    att = attention_layer(input=None, rnn_layer=rnnLayer, margin = margin)

    q = T.lvector('q')
    a = T.lscalar('a')
    p = T.lvector('p')
    t = T.lscalar('t')
    inputs = [q,a,p,t]
    score = att.predict(inputs)
    pred = theano.function(inputs=inputs,outputs=score)

    pool = ThreadPool()

    f = open('./data/test-small.id','r')
    count = 1
    print 'time_b:%s' %time.clock()  
    to_pred = []
    for line in f:
        if count % 10000 == 0:
	    print count / 10000
	count += 1
        #print 'time_b:%s' %time.clock()  
        line = line[:-1]
        tmp = line.split('\t')
        in_q = numpy.array(tmp[0].split(' ')).astype(numpy.int) - 1
        in_a = int(tmp[1].split(' ')[2]) - 1
        in_p = numpy.array(tmp[1].split(' ')).astype(numpy.int) - 1
        in_t = int(tmp[2]) - 1
	lis = (in_q, in_a, in_p, in_t)
	to_pred.append(lis)
        #print 'time_load:%s' %time.clock()  
        #print 'time_score:%s' %time.clock()  
    f.close()

    ay = numpy.asarray(to_pred)
    #results = map(pred, list(ay[:,0]), list(ay[:,1]),list(ay[:,2]),list(ay[:,3]))
    results = pool.map(pred, to_pred)
    #results = []
    #for p in to_pred:
    #    results.append(att.predict(p,params))
    print 'time_e:%s' %time.clock()
    #print results
    pool.close()
    pool.join()
开发者ID:yzyz7,项目名称:attgit,代码行数:56,代码来源:batch_predict.py


示例16: classify_lenet5

def classify_lenet5(learning_rate=0.005, n_epochs=8000,
                    image_path='D:/dev/datasets/isbi/train-input/train-input_0000.tif',
                    paramfile='lenet0_membrane_epoch_25100.pkl.gz',
                    nkerns=[20, 50], batch_size=1):

    rng = numpy.random.RandomState(23455)

    # allocate symbolic variables for the data
    index_x = T.lscalar()  # index to a [mini]batch
    index_y = T.lscalar()  # index to a [mini]batch
    x = T.matrix('x')   # the data is presented as rasterized images
    y = T.ivector('y')  # the labels are presented as 1D vector of
                        # [int] labels

    ishape = (28, 28)  # this is the size of MNIST images

    ######################
    # BUILD ACTUAL MODEL #
    ######################
    print '... building the model'

    # Reshape matrix of rasterized images of shape (batch_size,28*28)
    # to a 4D tensor, compatible with our LeNetConvPoolLayer
    layer0_input = x.reshape((batch_size, 1, 28, 28))

    # Construct the first convolutional pooling layer:
    # filtering reduces the image size to (28-5+1,28-5+1)=(24,24)
    # maxpooling reduces this further to (24/2,24/2) = (12,12)
    # 4D output tensor is thus of shape (batch_size,nkerns[0],12,12)
    layer0 = LeNetConvPoolLayer(rng, input=layer0_input,
            image_shape=(batch_size, 1, 28, 28),
            filter_shape=(nkerns[0], 1, 5, 5), poolsize=(2, 2))

    # Construct the second convolutional pooling layer
    # filtering reduces the image size to (12-5+1,12-5+1)=(8,8)
    # maxpooling reduces this further to (8/2,8/2) = (4,4)
    # 4D output tensor is thus of shape (nkerns[0],nkerns[1],4,4)
    layer1 = LeNetConvPoolLayer(rng, input=layer0.output,
            image_shape=(batch_size, nkerns[0], 12, 12),
            filter_shape=(nkerns[1], nkerns[0], 5, 5), poolsize=(2, 2))

    # the TanhLayer being fully-connected, it operates on 2D matrices of
    # shape (batch_size,num_pixels) (i.e matrix of rasterized images).
    # This will generate a matrix of shape (20,32*4*4) = (20,512)
    layer2_input = layer1.output.flatten(2)

    # construct a fully-connected sigmoidal layer
    layer2 = HiddenLayer(rng, input=layer2_input, n_in=nkerns[1] * 4 * 4,
                         n_out=500, activation=T.tanh)

    # classify the values of the fully-connected sigmoidal layer
    layer3 = LogisticRegression(input=layer2.output, n_in=500, n_out=2)

    # the cost we minimize during training is the NLL of the model
    cost = layer3.negative_log_likelihood(y)
开发者ID:Rhoana,项目名称:membrane_cnn,代码行数:55,代码来源:convert_pkl_to_hdf5.py


示例17: test

def test(model):
    dim = 128
    v_size = 7810
    margin = 1.0
    
    #load model
    f = open(model, 'rb')
    input_params = cPickle.load(f)
    emb, wx, wh, bh, wa = input_params
    f.close()
    
    embLayer = emb_layer(pre_train=emb, v = v_size, dim = dim) 
    rnnLayer = rnn_layer(input=None, wx=wx, wh=wh, bh=bh, emb_layer = embLayer, nh = dim) 
    att = attention_layer(input=None, rnn_layer=rnnLayer, margin = margin)

    q = T.lvector('q')
    a = T.lscalar('a')
    p = T.lvector('p')
    t = T.lscalar('t')
    inputs = [q,a,p,t]

    #emb_num = T.lscalar('emb_num')
    #nh = T.scalar('nh')
    #dim = T.scalar('dim')
    score = att.predict(inputs)
    pred = theano.function(inputs=inputs,outputs=score)

    

    wf = open('./data/res','w')
    f = open('./data/test.id','r')
    count = 1
    print 'time_b:%s' %time.clock()  
    for line in f:
        if count % 10000 == 0:
	    print count / 10000
            print 'time_1w:%s' %time.clock()  
	count += 1
        #print 'time_b:%s' %time.clock()  
        line = line[:-1]
        tmp = line.split('\t')
        in_q = numpy.array(tmp[0].split(' ')).astype(numpy.int) - 1
        #x = emb[q].reshape((q.shape[0], emb.shape[1]))
        in_a = int(tmp[1].split(' ')[2]) - 1
        in_p = numpy.array(tmp[1].split(' ')).astype(numpy.int) - 1
        in_t = int(tmp[2]) - 1
        #in_lis =  [in_q, in_a, in_p, in_t]
        #print 'time_load:%s' %time.clock()  
	s = pred(in_q, in_a, in_p, in_t)
	#print s
        wf.write(str(s) + '\n')
        #print 'time_score:%s' %time.clock()  
    f.close()
    wf.close()
开发者ID:yzyz7,项目名称:attgit,代码行数:54,代码来源:test_att.py


示例18: apply_net

    def apply_net(self, input_image, perform_downsample=False, perform_pad=False, perform_upsample=False, perform_blur=False, perform_offset=False):

        if perform_pad:
            input_image = np.pad(input_image, ((self.pad_by, self.pad_by), (self.pad_by, self.pad_by)), 'symmetric')

        if perform_downsample and self.downsample != 1:
            input_image = np.float32(mahotas.imresize(input_image, 1.0/self.downsample))

        nx = input_image.shape[0] - self.pad_by*2
        ny = input_image.shape[1] - self.pad_by*2
        nbatches = nx * ny

        output = np.zeros((nx, ny), dtype=np.float32)

        t_input_image = theano.shared(np.asarray(input_image,dtype=theano.config.floatX),borrow=True)

        index_x = T.lscalar()
        index_y = T.lscalar()

        # eval_network_l0 = theano.function([index_x, index_y], self.all_layers[0].output,
        #     givens={self.x: t_input_image[index_x:index_x + self.pad_by * 2 + 1, index_y:index_y + self.pad_by * 2 + 1]})
        # eval_network_l1 = theano.function([index_x, index_y], self.all_layers[1].output,
        #     givens={self.x: t_input_image[index_x:index_x + self.pad_by * 2 + 1, index_y:index_y + self.pad_by * 2 + 1]})
        # eval_network_l2 = theano.function([index_x, index_y], self.all_layers[2].output,
        #     givens={self.x: t_input_image[index_x:index_x + self.pad_by * 2 + 1, index_y:index_y + self.pad_by * 2 + 1]})
        eval_network = theano.function([index_x, index_y], self.all_layers[-1].output,
            givens={self.x: t_input_image[index_x:index_x + self.pad_by * 2 + 1, index_y:index_y + self.pad_by * 2 + 1]})

        for xi in range(nx):
            for yi in range(ny):
                # print eval_network_l0(xi, yi)[0,0,:,:]
                # print eval_network_l1(xi, yi)[0,0,:,:]
                # print eval_network_l2(xi, yi)[0,0,:,:]
                # print eval_network(xi, yi)[0,0]
                output[xi, yi] = eval_network(xi, yi)[0,0]
            print "up to x={0} of {1}".format(xi+1, nx)


        if perform_upsample:
            output = np.float32(mahotas.imresize(output, self.downsample))

        if perform_blur and self.best_sigma != 0:
            output = scipy.ndimage.filters.gaussian_filter(output, self.best_sigma)

        if perform_offset:
            #Translate
            output = np.roll(output, self.best_offset[0], axis=0)
            output = np.roll(output, self.best_offset[1], axis=1)

        # Crop to valid size
        #output = output[self.pad_by:-self.pad_by,self.pad_by:-self.pad_by]

        return output
开发者ID:Rhoana,项目名称:membrane_cnn,代码行数:53,代码来源:lib_maxout_theano.py


示例19: cost_function

     def cost_function(self,learning_rate,batch_size):
         index = T.lscalar()
         index1 = T.lscalar()
         
         """ cost function"""
         cost=self.negative_log_likelihood(self.y)
         """ Gradient of cost function"""
         g_W = T.grad(cost=cost, wrt=self.W)
         g_b = T.grad(cost=cost, wrt=self.b)    

         """ Gradient update equations used by gradient descent algorithms"""
         updates = [(self.W, self.W - learning_rate * g_W),(self.b, self.b - learning_rate * g_b)]
    
    
         num_samples=classifier.train[0].get_value(borrow=True).shape[0]
         print '\n\n********************************'
         #tbatch_size=batch_size;#feature.get_value(borrow=True).shape[0];
         print 'num of training samples :' + `num_samples`
         print 'num of dimensions :' + `self.n_in`
         print 'num of classes :' + `self.n_classes`    
         print 'Training batch size :' + `batch_size`
         
         #print 'Test batch size :' + `tbatch_size`
         self.n_train_batches = self.train[0].get_value(borrow=True).shape[0] / batch_size    
         self.n_valid_batches= self.validate[0].get_value(borrow=True).shape[0] / batch_size    
         self.n_test_batches= self.test[0].get_value(borrow=True).shape[0] / batch_size    
         #print 'num of training batches :'+`self.n_train_batches`
         
         self.train[1]=T.cast(self.train[1],'int32');
         self.test[1]=T.cast(self.test[1],'int32');
         self.validate[1]=T.cast(self.validate[1],'int32');

         """ Defining functions for training,testing and validation """
         self.train_model = theano.function(inputs=[index],
            outputs=cost,
            updates=updates,
            givens={
                self.x: self.train[0][index*batch_size:(index + 1)*batch_size],
                self.y: self.train[1][index*batch_size:(index + 1)*batch_size]});
    
         self.test_model = theano.function(inputs=[index1],
            outputs=[self.errors(self.y),self.y_pred],
             givens={
                 self.x: self.test[0][index1*batch_size:(index1 + 1)*batch_size],
                 self.y: self.test[1][index1*batch_size:(index1 + 1)*batch_size]});

    
         self.validate_model = theano.function(inputs=[index],
            outputs=self.errors(self.y),
            givens={
                self.x: self.validate[0][index * batch_size:(index + 1) * batch_size],
                self.y: self.validate[1][index * batch_size:(index + 1) * batch_size]})
开发者ID:biotrump,项目名称:OpenVision,代码行数:52,代码来源:LogisticRegression.py


示例20: __init__

    def __init__(self, dnodex,inputdim, name=""):
        pos_p=T.lscalar()
        neg_poi=T.lscalar()
	user=T.lscalar()
	eta=T.scalar()
	pfp_loss=T.scalar()
	if dnodex.pmatrix is None:
	    dnodex.umatrix=theano.shared(floatX(np.random.randn(*(dnodex.nuser, inputdim))))
            dnodex.pmatrix=theano.shared(floatX(np.random.randn(*(dnodex.npoi,inputdim))))
	n_updates=[(dnodex.pmatrix, T.set_subtensor(dnodex.pmatrix[neg_poi,:],dnodex.pmatrix[neg_poi,:]-eta*pfp_loss*dnodex.umatrix[user,:]-eta*eta*dnodex.pmatrix[neg_poi,:]))]
        p_updates=[(dnodex.pmatrix, T.set_subtensor(dnodex.pmatrix[pos_p,:],dnodex.pmatrix[pos_p,:]+eta*pfp_loss*dnodex.umatrix[user,:]-eta*eta*dnodex.pmatrix[pos_p,:])),(dnodex.umatrix, T.set_subtensor(dnodex.umatrix[user,:],dnodex.umatrix[user,:]+eta*pfp_loss*(dnodex.pmatrix[pos_p,:]-dnodex.pmatrix[neg_poi,:])-eta*eta*dnodex.umatrix[user,:]))]
        self.trainpos=theano.function([pos_p,neg_poi,user,eta,pfp_loss],updates=p_updates,allow_input_downcast=True)
        self.trainneg=theano.function([neg_poi,user,eta,pfp_loss],updates=n_updates,allow_input_downcast=True)
开发者ID:tonytongzhao,项目名称:PyRNN,代码行数:13,代码来源:lf.py



注:本文中的theano.tensor.lscalar函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。


鲜花

握手

雷人

路过

鸡蛋
该文章已有0人参与评论

请发表评论

全部评论

专题导读
上一篇:
Python tensor.lt函数代码示例发布时间:2022-05-27
下一篇:
Python tensor.log1p函数代码示例发布时间:2022-05-27
热门推荐
阅读排行榜

扫描微信二维码

查看手机版网站

随时了解更新最新资讯

139-2527-9053

在线客服(服务时间 9:00~18:00)

在线QQ客服
地址:深圳市南山区西丽大学城创智工业园
电邮:jeky_zhao#qq.com
移动电话:139-2527-9053

Powered by 互联科技 X3.4© 2001-2213 极客世界.|Sitemap