• 设为首页
  • 点击收藏
  • 手机版
    手机扫一扫访问
    迪恩网络手机版
  • 关注官方公众号
    微信扫一扫关注
    公众号

Python sharedvalue.shared函数代码示例

原作者: [db:作者] 来自: [db:来源] 收藏 邀请

本文整理汇总了Python中theano.compile.sharedvalue.shared函数的典型用法代码示例。如果您正苦于以下问题:Python shared函数的具体用法?Python shared怎么用?Python shared使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。



在下文中一共展示了shared函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。

示例1: __init__

    def __init__(self, anchors, K, D, alpha = 1.0, beta = 1.0, *args, **kwargs):
        #import pdb; pdb.set_trace()
        self.alpha = shared(alpha)
        self.beta = shared(beta)
        #mask contains zeros for elements fixed at 1E-6
        mask = np.ones((K,D))
        for anchor in anchors:
            #mask[:,anchor[1]] = 0
            for hold in anchor[1]:
                mask[:,hold] = 0
                mask[anchor[0],hold] = 1
        self.mask = TT.as_tensor_variable(mask)
 #       mask = TT.zeros_like(TT.as_tensor_variable(np.zeros((K,D))))
 #       for anchor in anchors:
 #           TT.set_subtensor(mask[anchor[0],:], 0)
 #           TT.set_subtensor(mask[anchor[0],anchor[1]], 1)
 #       self.mask = mask

        super(Beta_with_anchors, self).__init__(transform=anchored_betas(mask=self.mask, K=K, D=D, alpha=alpha, beta=beta), *args, **kwargs)
        #super(Beta_with_anchors, self).__init__(transform=anchored_betas(anchors=anchors, K=K, D=D, alpha=alpha, beta=beta), *args, **kwargs)
        #Z = np.ones((D,K), np.float64) - 0.5
        #self.mode = Z

#TODO: Should this be numpy like ratematrix
        self.mean = TT.ones_like(self.mask)*1E-6 
        self.mean = TT.set_subtensor(self.mean[self.mask.nonzero()], (alpha / (alpha + beta)))
开发者ID:clinicalml,项目名称:ContinuousTimeMarkovModel,代码行数:26,代码来源:distributions.py


示例2: __init__

    def __init__(self,
                 input=tensor.dvector('input'),
                 target=tensor.dvector('target'),
                 n_input=1, n_hidden=1, n_output=1, lr=1e-3, **kw):
        super(NNet, self).__init__(**kw)

        self.input = input
        self.target = target
        self.lr = shared(lr, 'learning_rate')
        self.w1 = shared(numpy.zeros((n_hidden, n_input)), 'w1')
        self.w2 = shared(numpy.zeros((n_output, n_hidden)), 'w2')
        # print self.lr.type

        self.hidden = sigmoid(tensor.dot(self.w1, self.input))
        self.output = tensor.dot(self.w2, self.hidden)
        self.cost = tensor.sum((self.output - self.target)**2)

        self.sgd_updates = {
            self.w1: self.w1 - self.lr * tensor.grad(self.cost, self.w1),
            self.w2: self.w2 - self.lr * tensor.grad(self.cost, self.w2)}

        self.sgd_step = pfunc(
            params=[self.input, self.target],
            outputs=[self.output, self.cost],
            updates=self.sgd_updates)

        self.compute_output = pfunc([self.input], self.output)

        self.output_from_hidden = pfunc([self.hidden], self.output)
开发者ID:12190143,项目名称:Theano,代码行数:29,代码来源:test_misc.py


示例3: __init__

    def __init__(self, hyperparameters):
        self.hyperparameters = hyperparameters
        numpy.random.seed()

        self.embeddings = numpy.asarray((numpy.random.rand(self.hyperparameters.vocab_size, self.hyperparameters.embedding_size) - 0.5)* 2 * 0.01, dtype=floatX)
        self.hidden_weights = shared(numpy.asarray(random_weights(self.hyperparameters.input_size, self.hyperparameters.hidden_size, scale_by=1), dtype=floatX))
        self.output_weights = shared(numpy.asarray(random_weights(self.hyperparameters.hidden_size, self.hyperparameters.output_size, scale_by=1), dtype=floatX))
        self.hidden_biases = shared(numpy.asarray(numpy.zeros((self.hyperparameters.hidden_size,)), dtype=floatX))
        self.output_biases = shared(numpy.asarray(numpy.zeros((self.hyperparameters.output_size,)), dtype=floatX))
开发者ID:sinopeus,项目名称:thrax,代码行数:9,代码来源:parameters.py


示例4: test_strict_generic

    def test_strict_generic(self):

        # this should work, because
        # generic can hold anything even when strict=True

        u = shared('asdf', strict=False)
        v = shared('asdf', strict=True)

        u.set_value(88)
        v.set_value(88)
开发者ID:12190143,项目名称:Theano,代码行数:10,代码来源:test_shared.py


示例5: run_nnet

def run_nnet(use_gpu, n_batch=60, n_in=1024, n_hid=2048, n_out=10,
             n_train=100):

    if config.mode == 'DEBUG_MODE':
        n_train = 1

    if use_gpu:
        w = tcn.shared_constructor(0.01 * (my_rand(n_in, n_hid) - 0.5), 'w')
        b = tcn.shared_constructor(my_zeros(n_hid), 'b')
        v = tcn.shared_constructor(my_zeros((n_hid, n_out)), 'c')
        c = tcn.shared_constructor(my_zeros(n_out), 'c')
    else:
        w = shared(0.01 * (my_rand(n_in, n_hid) - 0.5), 'w')
        b = shared(my_zeros(n_hid), 'b')
        v = shared(my_zeros((n_hid, n_out)), 'c')
        c = shared(my_zeros(n_out), 'c')

    x = tensor.fmatrix('x')
    y = tensor.fmatrix('y')
    lr = tensor.fscalar('lr')

    hid = tensor.tanh(tensor.dot(x, w) + b)
    out = tensor.tanh(tensor.dot(hid, v) + c)
    loss = tensor.sum(0.5 * (out - y) ** 2 * lr)
    if 0:
        print('loss type', loss.type)

    params = [w, b, v, c]
    gparams = tensor.grad(loss, params)

    mode = get_mode(use_gpu)

    # print 'building pfunc ...'
    train = pfunc([x, y, lr], [loss], mode=mode,
                  updates=[(p, p - g) for p, g in izip(params, gparams)])

    if 0:
        for i, n in enumerate(train.maker.fgraph.toposort()):
            print(i, n)

    xval = my_rand(n_batch, n_in)
    yval = my_rand(n_batch, n_out)
    lr = theano._asarray(0.01, dtype='float32')

    t0 = time.time()
    rval = []
    for i in xrange(n_train):
        rval.append(train(xval, yval, lr))
    dt = time.time() - t0

    print_mode(mode)
    return numpy.asarray(rval), dt
开发者ID:12190143,项目名称:Theano,代码行数:52,代码来源:test_mlp.py


示例6: test_scalar_floatX

    def test_scalar_floatX(self):

        # the test should assure that floatX is not used in the shared
        # constructor for scalars Shared values can change, and since we don't
        # know the range they might take, we should keep the same
        # bit width / precision as the original value used to create the
        # shared variable.

        # Since downcasting of a value now raises an Exception,

        def f(var, val):
            var.set_value(val)

        b = shared(numpy.int64(7), allow_downcast=True)
        assert b.type == theano.tensor.lscalar
        f(b, 8.23)
        assert b.get_value() == 8

        b = shared(numpy.int32(7), allow_downcast=True)
        assert b.type == theano.tensor.iscalar
        f(b, 8.23)
        assert b.get_value() == 8

        b = shared(numpy.int16(7), allow_downcast=True)
        assert b.type == theano.tensor.wscalar
        f(b, 8.23)
        assert b.get_value() == 8

        b = shared(numpy.int8(7), allow_downcast=True)
        assert b.type == theano.tensor.bscalar
        f(b, 8.23)
        assert b.get_value() == 8

        b = shared(numpy.float64(7.234), allow_downcast=True)
        assert b.type == theano.tensor.dscalar
        f(b, 8)
        assert b.get_value() == 8

        b = shared(numpy.float32(7.234), allow_downcast=True)
        assert b.type == theano.tensor.fscalar
        f(b, 8)
        assert b.get_value() == 8

        b = shared(numpy.float(7.234), allow_downcast=True)
        assert b.type == theano.tensor.dscalar
        f(b, 8)
        assert b.get_value() == 8

        b = shared(7.234, allow_downcast=True)
        assert b.type == theano.tensor.dscalar
        f(b, 8)
        assert b.get_value() == 8

        b = shared(numpy.zeros((5, 5), dtype='float32'))
        self.assertRaises(TypeError, f, b, numpy.random.rand(5, 5))
开发者ID:12190143,项目名称:Theano,代码行数:55,代码来源:test_shared.py


示例7: gen

    def gen(self, op, *args, **kwargs):
        """Create a new random stream in this container.

        :param op: a RandomFunction instance to

        :param args: interpreted by `op`

        :param kwargs: interpreted by `op`

        :returns: The symbolic random draw part of op()'s return
        value.  This function stores the updated RandomStateType
        Variable for use at `build` time.

        :rtype: TensorVariable

        """
        seed = int(self.gen_seedgen.randint(2 ** 30))
        random_state_variable = shared(numpy.random.RandomState(seed))
        # Add a reference to distinguish from other shared variables
        random_state_variable.tag.is_rng = True
        new_r, out = op(random_state_variable, *args, **kwargs)
        out.rng = random_state_variable
        out.update = (random_state_variable, new_r)
        self.state_updates.append(out.update)
        random_state_variable.default_update = new_r
        return out
开发者ID:Ambier,项目名称:Theano,代码行数:26,代码来源:shared_randomstreams.py


示例8: train

    def train(self, train_set_x, pretraining_epochs=15,
            pretrain_lr=0.001, batch_size=1, n_ins=784,
            hidden_layers_sizes=[500, 500]):
        """
        对StackedAutoEncoder进行训练
        """

        if not isinstance(train_set_x, TensorSharedVariable):
            train_set_x = shared(train_set_x)
        n_train_batches = train_set_x.get_value(borrow=True).shape[0]
        n_train_batches /= batch_size

        print "hidden_layers_sizes: ", hidden_layers_sizes

        print "... building the model"
        numpy_rng = numpy.random.RandomState(89677)
        self.sda = AdvancedStackedAutoEncoder(
            numpy_rng=numpy_rng,
            n_ins=n_ins,
            hidden_layers_sizes=hidden_layers_sizes,
        )

        print "... getting the pretraining function"
        pretraining_fns = self.sda.pretraining_functions(train_set_x=train_set_x,
                                                batch_size=batch_size)
        print '... pre-training the model'
        for i in xrange(self.sda.n_layers):
            # go through pretraining epochs
            for epoch in xrange(pretraining_epochs):
                # go through the training set
                c = []
                for batch_index in xrange(n_train_batches):
                    c.append(pretraining_fns[i](index=batch_index,lr=pretrain_lr))
                print 'Pre-training layer %i, epoch %d, cost ' % (i, epoch),
                print numpy.mean(c)
开发者ID:zixuan-zhang,项目名称:OpenSV,代码行数:35,代码来源:auto_encoder.py


示例9: mv_shared

def mv_shared(*args, **kwargs):
    '''mv_shared works same as `theano.shared`

    It calls `theano.shared` to create the SharedVariable and use
    MVSharedVariable to wrap it.
    '''
    var = shared(*args, **kwargs)
    mv_shared.shared_vars.append(MVSharedVariable(var))
    return var
开发者ID:ericxsun,项目名称:multiverso,代码行数:9,代码来源:sharedvar.py


示例10: test_tensor_floatX

    def test_tensor_floatX(self):
        def f(var, val):
            var.set_value(val)

        b = shared(numpy.int64([7]), allow_downcast=True)
        assert b.type == theano.tensor.lvector
        f(b, [8.23])
        assert b.get_value() == 8

        b = shared(numpy.int32([7]), allow_downcast=True)
        assert b.type == theano.tensor.ivector
        f(b, [8.23])
        assert b.get_value() == 8

        b = shared(numpy.int16([7]), allow_downcast=True)
        assert b.type == theano.tensor.wvector
        f(b, [8.23])
        assert b.get_value() == 8

        b = shared(numpy.int8([7]), allow_downcast=True)
        assert b.type == theano.tensor.bvector
        f(b, [8.23])
        assert b.get_value() == 8

        b = shared(numpy.float64([7.234]), allow_downcast=True)
        assert b.type == theano.tensor.dvector
        f(b, [8])
        assert b.get_value() == 8

        b = shared(numpy.float32([7.234]), allow_downcast=True)
        assert b.type == theano.tensor.fvector
        f(b, [8])
        assert b.get_value() == 8

# numpy.float([7.234]) don't work
#        b = shared(numpy.float([7.234]))
#        assert b.type == theano.tensor.dvector
#        f(b,[8])

# This generate a generic type. Should we cast? I don't think.
#        b = shared([7.234])
#        assert b.type == theano.tensor.dvector
#        f(b,[8])

        b = shared(numpy.asarray([7.234], dtype=theano.config.floatX),
                   allow_downcast=True)
        assert b.dtype == theano.config.floatX
        f(b, [8])
        assert b.get_value() == 8

        b = shared(numpy.zeros((5, 5), dtype='float32'))
        self.assertRaises(TypeError, f, b, numpy.random.rand(5, 5))
开发者ID:12190143,项目名称:Theano,代码行数:52,代码来源:test_shared.py


示例11: test_ctors

    def test_ctors(self):

        if theano.configdefaults.python_int_bitwidth() == 32:
            assert shared(7).type == theano.tensor.iscalar, shared(7).type
        else:
            assert shared(7).type == theano.tensor.lscalar, shared(7).type
        assert shared(7.0).type == theano.tensor.dscalar
        assert shared(numpy.float32(7)).type == theano.tensor.fscalar

        # test tensor constructor
        b = shared(numpy.zeros((5, 5), dtype='int32'))
        assert b.type == TensorType('int32', broadcastable=[False, False])
        b = shared(numpy.random.rand(4, 5))
        assert b.type == TensorType('float64', broadcastable=[False, False])
        b = shared(numpy.random.rand(5, 1, 2))
        assert b.type == TensorType('float64', broadcastable=[False, False, False])

        assert shared([]).type == generic

        def badfunc():
            shared(7, bad_kw=False)
        self.assertRaises(TypeError, badfunc)
开发者ID:12190143,项目名称:Theano,代码行数:22,代码来源:test_shared.py


示例12: __init__

    def __init__(self, window_size, vocab_size, embedding_size, hidden_size, seed, initial_embeddings, two_hidden_layers):
        """
        Initialize L{Model} parameters.
        """

        self.vocab_size     = vocab_size
        self.window_size    = window_size
        self.embedding_size = embedding_size
        self.two_hidden_layers = two_hidden_layers
        if LBL:
            self.hidden_size    = hidden_size
            self.output_size    = self.embedding_size
        else:
            self.hidden_size    = hidden_size
            self.output_size    = 1

        import numpy
        import hyperparameters

        from pylearn.algorithms.weights import random_weights
        numpy.random.seed(seed)
        if initial_embeddings is None:
            self.embeddings = numpy.asarray((numpy.random.rand(self.vocab_size, HYPERPARAMETERS["EMBEDDING_SIZE"]) - 0.5)*2 * HYPERPARAMETERS["INITIAL_EMBEDDING_RANGE"], dtype=floatX)
        else:
            assert initial_embeddings.shape == (self.vocab_size, HYPERPARAMETERS["EMBEDDING_SIZE"])
            self.embeddings = copy.copy(initial_embeddings)
        if HYPERPARAMETERS["NORMALIZE_EMBEDDINGS"]: self.normalize(range(self.vocab_size))
        if LBL:
            self.output_weights = shared(numpy.asarray(random_weights(self.input_size, self.output_size, scale_by=HYPERPARAMETERS["SCALE_INITIAL_WEIGHTS_BY"]), dtype=floatX))
            self.output_biases = shared(numpy.asarray(numpy.zeros((1, self.output_size)), dtype=floatX))
            self.score_biases = shared(numpy.asarray(numpy.zeros(self.vocab_size), dtype=floatX))
            assert not self.two_hidden_layers
        else:
            self.hidden_weights = shared(numpy.asarray(random_weights(self.input_size, self.hidden_size, scale_by=HYPERPARAMETERS["SCALE_INITIAL_WEIGHTS_BY"]), dtype=floatX))
            self.hidden_biases = shared(numpy.asarray(numpy.zeros((self.hidden_size,)), dtype=floatX))
            if self.two_hidden_layers:
                self.hidden2_weights = shared(numpy.asarray(random_weights(self.hidden_size, self.hidden_size, scale_by=HYPERPARAMETERS["SCALE_INITIAL_WEIGHTS_BY"]), dtype=floatX))
                self.hidden2_biases = shared(numpy.asarray(numpy.zeros((self.hidden_size,)), dtype=floatX))
            self.output_weights = shared(numpy.asarray(random_weights(self.hidden_size, self.output_size, scale_by=HYPERPARAMETERS["SCALE_INITIAL_WEIGHTS_BY"]), dtype=floatX))
            self.output_biases = shared(numpy.asarray(numpy.zeros((self.output_size,)), dtype=floatX))
开发者ID:AcademiaSinicaNLPLab,项目名称:neural-language-model,代码行数:40,代码来源:parameters.py


示例13: test_scalar_strict

    def test_scalar_strict(self):
        def f(var, val):
            var.set_value(val)

        b = shared(numpy.int64(7), strict=True)
        assert b.type == theano.tensor.lscalar
        self.assertRaises(TypeError, f, b, 8.23)

        b = shared(numpy.int32(7), strict=True)
        assert b.type == theano.tensor.iscalar
        self.assertRaises(TypeError, f, b, 8.23)

        b = shared(numpy.int16(7), strict=True)
        assert b.type == theano.tensor.wscalar
        self.assertRaises(TypeError, f, b, 8.23)

        b = shared(numpy.int8(7), strict=True)
        assert b.type == theano.tensor.bscalar
        self.assertRaises(TypeError, f, b, 8.23)

        b = shared(numpy.float64(7.234), strict=True)
        assert b.type == theano.tensor.dscalar
        self.assertRaises(TypeError, f, b, 8)

        b = shared(numpy.float32(7.234), strict=True)
        assert b.type == theano.tensor.fscalar
        self.assertRaises(TypeError, f, b, 8)

        b = shared(numpy.float(7.234), strict=True)
        assert b.type == theano.tensor.dscalar
        self.assertRaises(TypeError, f, b, 8)

        b = shared(7.234, strict=True)
        assert b.type == theano.tensor.dscalar
        self.assertRaises(TypeError, f, b, 8)

        b = shared(numpy.zeros((5, 5), dtype='float32'))
        self.assertRaises(TypeError, f, b, numpy.random.rand(5, 5))
开发者ID:12190143,项目名称:Theano,代码行数:38,代码来源:test_shared.py


示例14: test_tensor_strict

    def test_tensor_strict(self):
        def f(var, val):
            var.set_value(val)

        b = shared(numpy.int64([7]), strict=True)
        assert b.type == theano.tensor.lvector
        self.assertRaises(TypeError, f, b, 8.23)

        b = shared(numpy.int32([7]), strict=True)
        assert b.type == theano.tensor.ivector
        self.assertRaises(TypeError, f, b, 8.23)

        b = shared(numpy.int16([7]), strict=True)
        assert b.type == theano.tensor.wvector
        self.assertRaises(TypeError, f, b, 8.23)

        b = shared(numpy.int8([7]), strict=True)
        assert b.type == theano.tensor.bvector
        self.assertRaises(TypeError, f, b, 8.23)

        b = shared(numpy.float64([7.234]), strict=True)
        assert b.type == theano.tensor.dvector
        self.assertRaises(TypeError, f, b, 8)

        b = shared(numpy.float32([7.234]), strict=True)
        assert b.type == theano.tensor.fvector
        self.assertRaises(TypeError, f, b, 8)

# numpy.float([7.234]) don't work
#        b = shared(numpy.float([7.234]), strict=True)
#        assert b.type == theano.tensor.dvector
#        self.assertRaises(TypeError, f, b, 8)

# This generate a generic type. Should we cast? I don't think.
#        b = shared([7.234], strict=True)
#        assert b.type == theano.tensor.dvector
#        self.assertRaises(TypeError, f, b, 8)

        b = shared(numpy.zeros((5, 5), dtype='float32'))
        self.assertRaises(TypeError, f, b, numpy.random.rand(5, 5))
开发者ID:12190143,项目名称:Theano,代码行数:40,代码来源:test_shared.py


示例15: test_SdA

def test_SdA():
    """
    test AdvancedStackedAutoEncoder
    """
    # test Sda 就是这么搞
    # 现在想要得到feature只需要执行两个接口即可
    # train(), get_features() 非常easy
    train_sets = [
            [1., 1., 1.],
            [2., 2., 2.],
        ]

    train_set_x = numpy.asarray(train_sets)
    train_set_x = shared(train_set_x)
    # test_set = [4.] * (28*28)
    test_set = [4.] * 3
    driver = StackedAutoEncoderDriver()
    driver.train(train_set_x, n_ins=3, hidden_layers_sizes=[2, 1])
    # driver.train_with_mnist()

    params = driver.sda.params
    features = driver.get_features(test_set)
    print features
开发者ID:zixuan-zhang,项目名称:OpenSV,代码行数:23,代码来源:auto_encoder.py


示例16: rebuild_collect_shared

def rebuild_collect_shared(outputs,
                           inputs=None,
                           replace=None,
                           updates=None,
                           rebuild_strict=True,
                           copy_inputs_over=True,
                           no_default_updates=False,
                           ):
    """
    Function that allows replacing subgraphs of a computational
    graph.

    It returns a set of dictionaries and lists which collect (partial?)
    different information about shared variables. This info is required by
    `pfunc`.


    :type outputs: list of Theano Variables ( or Theano expressions)
    :param outputs: list of Theano variables or expressions representing the
                    outputs of the computational graph

    :type inputs: list of Theano Variables ( or Theano expressions)
    :param inputs: list of Theano variables or expressions representing the
                    inputs of the computational graph (or None)
    :type replace: dict
    :param replace: dictionary describing which subgraphs should be
                    replaced by what. orig_value => new_value

    :type updates: dict
    :param updates: dictionary describing updates expressions for shared
                    variables

    :type rebuild_strict: bool
    :param rebuild_strict: flag, if true the type of all inputs should be
                            the same as the for the current node

    :type copy_inputs_over: bool
    :param copy_inputs_over: flag; if False it will clone inputs

    :type no_default_updates: either bool or list of Variables
    :param no_default_updates: if True, do not perform any automatic update
                               on Variables. If False (default), perform
                               them all. Else, perform automatic updates
                               on all Variables that are neither in
                               "updates" nor in "no_default_updates".

    """

    if isinstance(outputs, tuple):
        outputs = list(outputs)

    # This function implements similar functionality as graph.clone
    # and it should be merged with that
    clone_d = {}
    update_d = {}
    update_expr = []
    # list of shared inputs that are used as inputs of the graph
    shared_inputs = []

    def clone_v_get_shared_updates(v, copy_inputs_over):
        '''
        Clones a variable and its inputs recursively until all are in
        clone_d. Also appends all shared variables met along the way to
        shared inputs, and their default_update (if applicable) to update_d
        and update_expr.

        v can have an fgraph attached to it, case in which we want to clone
        constants ( to avoid having a constant belonging to two fgraphs)
        '''
        # this co-recurses with clone_a
        assert v is not None
        if v in clone_d:
            return clone_d[v]
        if v.owner:
            clone_a(v.owner, copy_inputs_over)
            return clone_d.setdefault(v, v)
        elif isinstance(v, SharedVariable):
            if v not in shared_inputs:
                shared_inputs.append(v)
            if hasattr(v, 'default_update'):
                # Check that v should not be excluded from the default
                # updates list
                if (no_default_updates is False or
                    (isinstance(no_default_updates, list) and
                     v not in no_default_updates)):
                    # Do not use default_update if a "real" update was
                    # provided
                    if v not in update_d:
                        v_update = v.type.filter_variable(v.default_update,
                                                          allow_convert=False)
                        if v_update.type != v.type:
                            raise TypeError(
                                'an update must have the same type as '
                                'the original shared variable',
                                (v, v.type, v_update, v_update.type))
                        update_d[v] = v_update
                        update_expr.append((v, v_update))
        if not copy_inputs_over or (isinstance(v, Constant) and
                                    hasattr(v, 'fgraph')):
            # Cloning shared variables implies copying their underlying
#.........这里部分代码省略.........
开发者ID:Censio,项目名称:Theano,代码行数:101,代码来源:pfunc.py


示例17: _update_classifier

    def _update_classifier(self, data, labels, w, classes):
        """Update the classifier parameters theta and bias

        Parameters
        ----------

        data : list of 2D arrays, element i has shape=[voxels_i, samples_i]
            Each element in the list contains the fMRI data of one subject for
            the classification task.

        labels : list of arrays of int, element i has shape=[samples_i]
            Each element in the list contains the labels for the data samples
            in data_sup.

        w : list of 2D array, element i has shape=[voxels_i, features]
            The orthogonal transforms (mappings) :math:`W_i` for each subject.

        classes : int
            The number of classes in the classifier.


        Returns
        -------

        theta : array, shape=[features, classes]
            The MLR parameter for the class planes.

        bias : array shape=[classes,]
            The MLR parameter for class biases.
        """

        # Stack the data and labels for training the classifier
        data_stacked, labels_stacked, weights = \
            SSSRM._stack_list(data, labels, w)

        features = w[0].shape[1]
        total_samples = weights.size

        data_th = S.shared(data_stacked.astype(theano.config.floatX))
        val_ = S.shared(labels_stacked)
        total_samples_S = S.shared(total_samples)
        theta_th = T.matrix(name='theta', dtype=theano.config.floatX)
        bias_th = T.col(name='bias', dtype=theano.config.floatX)
        constf2 = S.shared(self.alpha / self.gamma, allow_downcast=True)
        weights_th = S.shared(weights)

        log_p_y_given_x = \
            T.log(T.nnet.softmax((theta_th.T.dot(data_th.T)).T + bias_th.T))
        f = -constf2 * T.sum((log_p_y_given_x[T.arange(total_samples_S), val_])
                             / weights_th) + 0.5 * T.sum(theta_th ** 2)

        manifold = Product((Euclidean(features, classes),
                            Euclidean(classes, 1)))
        problem = Problem(manifold=manifold, cost=f, arg=[theta_th, bias_th],
                          verbosity=0)
        solver = ConjugateGradient(mingradnorm=1e-6)
        solution = solver.solve(problem)
        theta = solution[0]
        bias = solution[1]

        del constf2
        del theta_th
        del bias_th
        del data_th
        del val_
        del solver
        del solution

        return theta, bias
开发者ID:IntelPNI,项目名称:brainiak,代码行数:69,代码来源:sssrm.py


示例18: _update_w

    def _update_w(self, data_align, data_sup, labels, w, s, theta, bias):
        """

        Parameters
        ----------
        data_align : list of 2D arrays, element i has shape=[voxels_i, n_align]
            Each element in the list contains the fMRI data for alignment of
            one subject. There are n_align samples for each subject.

        data_sup : list of 2D arrays, element i has shape=[voxels_i, samples_i]
            Each element in the list contains the fMRI data of one subject for
            the classification task.

        labels : list of arrays of int, element i has shape=[samples_i]
            Each element in the list contains the labels for the data samples
            in data_sup.

        w : list of array, element i has shape=[voxels_i, features]
            The orthogonal transforms (mappings) :math:`W_i` for each subject.

        s : array, shape=[features, samples]
            The shared response.

        theta : array, shape=[classes, features]
            The MLR class plane parameters.

        bias : array, shape=[classes]
            The MLR class biases.

        Returns
        -------

        w : list of 2D array, element i has shape=[voxels_i, features]
            The updated orthogonal transforms (mappings).
        """
        subjects = len(data_align)

        s_th = S.shared(s.astype(theano.config.floatX))
        theta_th = S.shared(theta.T.astype(theano.config.floatX))
        bias_th = S.shared(bias.T.astype(theano.config.floatX),
                           broadcastable=(True, False))

        for subject in range(subjects):
            logger.info('Subject Wi %d' % subject)
            # Solve for subject i
            # Create the theano function
            w_th = T.matrix(name='W', dtype=theano.config.floatX)
            data_srm_subject = \
                S.shared(data_align[subject].astype(theano.config.floatX))
            constf1 = \
                S.shared((1 - self.alpha) * 0.5 / data_align[subject].shape[1],
                         allow_downcast=True)
            f1 = constf1 * T.sum((data_srm_subject - w_th.dot(s_th))**2)

            if data_sup[subject] is not None:
                lr_samples_S = S.shared(data_sup[subject].shape[1])
                data_sup_subject = \
                    S.shared(data_sup[subject].astype(theano.config.floatX))
                labels_S = S.shared(labels[subject])
                constf2 = S.shared(-self.alpha / self.gamma
                                   / data_sup[subject].shape[1],
                                   allow_downcast=True)

                log_p_y_given_x = T.log(T.nnet.softmax((theta_th.dot(
                    w_th.T.dot(data_sup_subject))).T + bias_th))
                f2 = constf2 * T.sum(
                    log_p_y_given_x[T.arange(lr_samples_S), labels_S])
                f = f1 + f2
            else:
                f = f1

            # Define the problem and solve
            f_subject = self._objective_function_subject(data_align[subject],
                                                         data_sup[subject],
                                                         labels[subject],
                                                         w[subject],
                                                         s, theta, bias)
            minstep = np.min((10**-np.floor(np.log10(f_subject))), 1e-1)
            manifold = Stiefel(w[subject].shape[0], w[subject].shape[1])
            problem = Problem(manifold=manifold, cost=f, arg=w_th, verbosity=0)
            solver = ConjugateGradient(mingradnorm=1e-2, minstepsize=minstep)
            w[subject] = np.array(solver.solve(
                problem, x=w[subject].astype(theano.config.floatX)))
            if data_sup[subject] is not None:
                del f2
                del log_p_y_given_x
                del data_sup_subject
                del labels_S
            del solver
            del problem
            del manifold
            del f
            del f1
            del data_srm_subject
            del w_th
        del theta_th
        del bias_th
        del s_th

        # Run garbage collector to avoid filling up the memory
#.........这里部分代码省略.........
开发者ID:IntelPNI,项目名称:brainiak,代码行数:101,代码来源:sssrm.py


示例19: pfunc

def pfunc(params, outputs=None, mode=None, updates=None, givens=None,
        no_default_updates=False, accept_inplace=False, name=None,
        rebuild_strict=True, allow_input_downcast=None,
        profile=None, on_unused_input=None):
    """Function-constructor for graphs with shared variables.

    :type params: list of either Variable or Param instances.
    :param params: function parameters, these are not allowed to be shared
    variables

    :type outputs: list of Variables or Out instances
    :param outputs: expressions to compute

    :type mode: string or `theano.compile.Mode` instance.
    :param mode: compilation mode

    :type updates: iterable over pairs (shared_variable, new_expression). List, tuple or dict.
    :param updates: update the values for SharedVariable inputs according to these expressions

    :type givens: iterable over pairs (Var1, Var2) of Variables. List, tuple or dict.  The Var1
    and Var2 in each pair must have the same Type.

    :param givens: specific substitutions to make in the computation graph (Var2 replaces
    Var1).

    :type no_default_updates: either bool or list of Variables
    :param no_default_updates: if True, do not perform any automatic update on Variables.
    If False (default), perform them all. Else, perform automatic updates on all Variables
    that are neither in "updates" nor in "no_default_updates".

    :type name: None or string
    :param name: attaches a name to the profiling result of this function.

    :type allow_input_downcast: Boolean
    :param allow_input_downcast: True means that the values passed as
    inputs when calling the function can be silently downcasted to fit
    the dtype of the corresponding Variable, which may lose precision.
    False means that it will only be cast to a more general, or
    precise, type. None (default) is almost like False, but allows
    downcasting of Python float scalars to floatX.

    :type profile: None, True, str, or ProfileStats instance
    :param profile: accumulate profiling information into a given ProfileStats
    instance. None is the default, and means to use the value of
    config.profile.
    If argument is `True` then a new ProfileStats instance will be
    used.  If argument is a string, a new ProfileStats instance will be created
    with that string as its `message` attribute.  This profiling object will be
    available via self.profile.

    :type on_unused_input: str
    :param on_unused_input: What to do if a variable in the 'inputs' list
        is not used in the graph. Possible values are 'raise', 'warn',
        'ignore' and None.


    :rtype: theano.compile.Function
    :returns: a callable object that will compute the outputs (given the inputs)
    and update the implicit function arguments according to the `updates`.


    :note: Regarding givens: Be careful to make sure that these substitutions are
    independent--behaviour when Var1 of one pair appears in the graph leading to Var2 in
    another expression is undefined.  Replacements specified with givens are different from
    optimizations in that Var2 is not expected to be equivalent to Var1.

    """
    #
    # This function works by cloning the graph (except for the inputs), and then shipping it
    # off to compile.function
    # (There it will be cloned again, unnecessarily, because it doesn't know that we already
    # cloned it.)
    #
    # First, it clones the replacements named in the givens argument, and points each Var1 to
    # the clone of Var2.
    # Then it sets the inputs in the clone dictionary.
    # After these steps, we are assuming that the clone dictionary contains all the inputs to
    # the computation graph.
    #
    # Then it clones the outputs and the update expressions.  This rebuilds a computation graph
    # from the inputs and the givens.
    #
    if updates is None:
        updates = []
    if givens is None:
        givens = []
    if profile is None:
        profile = config.profile
        # profile -> True or False
    if profile == True:
        profile = ProfileStats(message=name)
        # profile -> object
    if type(profile) == str:
        profile = ProfileStats(message=profile)
    # profile is typically either False or an object at this point.
    # No need to block other objects being passed through though. It might be
    # useful.

    if not isinstance(params, (list, tuple)):
        raise Exception("in pfunc() the first argument must be a list or a tuple")
#.........这里部分代码省略.........
开发者ID:nicholas-leonard,项目名称:Theano,代码行数:101,代码来源:pfunc.py


示例20: badfunc

 def badfunc():
     shared(7, bad_kw=False)
开发者ID:12190143,项目名称:Theano,代码行数:2,代码来源:test_shared.py



注:本文中的theano.compile.sharedvalue.shared函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。


鲜花

握手

雷人

路过

鸡蛋
该文章已有0人参与评论

请发表评论

全部评论

专题导读
上一篇:
Python gof.local_optimizer函数代码示例发布时间:2022-05-27
下一篇:
Python pfunc.rebuild_collect_shared函数代码示例发布时间:2022-05-27
热门推荐
阅读排行榜

扫描微信二维码

查看手机版网站

随时了解更新最新资讯

139-2527-9053

在线客服(服务时间 9:00~18:00)

在线QQ客服
地址:深圳市南山区西丽大学城创智工业园
电邮:jeky_zhao#qq.com
移动电话:139-2527-9053

Powered by 互联科技 X3.4© 2001-2213 极客世界.|Sitemap