• 设为首页
  • 点击收藏
  • 手机版
    手机扫一扫访问
    迪恩网络手机版
  • 关注官方公众号
    微信扫一扫关注
    公众号

Python tensor.stack函数代码示例

原作者: [db:作者] 来自: [db:来源] 收藏 邀请

本文整理汇总了Python中theano.tensor.stack函数的典型用法代码示例。如果您正苦于以下问题:Python stack函数的具体用法?Python stack怎么用?Python stack使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。



在下文中一共展示了stack函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。

示例1: stack_and_shared

def stack_and_shared(_input):
    """
    This will take a list of input variables, turn them into theano shared variables, and return them stacked
    in a single tensor.

    :param _input: list of input variables
    :type _input: list, object, or none

    :return: symbolic tensor of the input variables stacked, or none
    :rtype: Tensor or None
    """
    if _input is None:
        return None
    elif isinstance(_input, list):
        shared_ins = []
        for _in in _input:
            try:
                shared_ins.append(theano.shared(_in))
            except TypeError as _:
                shared_ins.append(_in)
        return T.stack(shared_ins)
    else:
        try:
            _output = [theano.shared(_input)]
        except TypeError as _:
            _output = [_input]
        return T.stack(_output)
开发者ID:chagge,项目名称:OpenDeep,代码行数:27,代码来源:misc.py


示例2: _step

        def _step(x_, h_, c_, pred_, prob_):
            h_a = []
            c_a = []
            for it in range(self.n_levels):
                preact = T.dot(h_[it], self.U[it])
                preact += T.dot(x_, self.W[it]) + self.b[it]

                i = T.nnet.sigmoid(_slice(preact, 0, self.n_dim))
                f = T.nnet.sigmoid(_slice(preact, 1, self.n_dim))
                o = T.nnet.sigmoid(_slice(preact, 2, self.n_dim))
                c = T.tanh(_slice(preact, 3, self.n_dim))

                c = f * c_[it] + i * c
                h = o * T.tanh(c)

                h_a.append(h)
                c_a.append(c)

                x_ = h

            q = T.dot(h, self.L) + self.b0
            prob = T.nnet.softmax(q)
            pred = T.argmax(prob, axis=1)

            return T.stack(h_a).squeeze(), T.stack(c_a).squeeze(), pred, prob
开发者ID:velicue,项目名称:char-rnn-theano,代码行数:25,代码来源:lstm.py


示例3: predict_K

    def predict_K(self, x, z, params):
        # s_mean, s_x for computing mean from s_x
        Ks = []
        Ks_new = []
        offset = 0
        for kern, slice_k in zip(self.kernels, self.slices):
            params_k = params[offset: offset + kern.n_params]
            K_k, K_new_k = kern.predict_K(
                x[:, slice_k], z[:, slice_k], params_k)
            Ks.append(K_k)
            Ks_new.append(K_new_k)
            offset += kern.n_params

        log_weights = TT.concatenate((np.asarray([0]),
                                      params[offset:offset + self.n_my_params]))
        weights = TT.exp(log_weights) / TT.exp(log_weights).sum()

        if len(self.kernels) == 1:
            return Ks[0], Ks_new[0]
        else:
            # XXX: log_K, should be logadd here (#11)
            wK = TT.sum(
                weights[:, None, None] * TT.stack(*Ks), axis=0)
            wK_new = TT.sum(
                weights[:, None, None] * TT.stack(*Ks_new), axis=0)
            return wK, wK_new
开发者ID:gopal-m,项目名称:hyperopt-gpsmbo,代码行数:26,代码来源:kernels.py


示例4: tangent2ambient

 def tangent2ambient(self, X, Z):
     U = tensor.stack((X.U.dot(Z.M) + Z.Up, X.U), 0).reshape((-1, X.U.shape[1]))
     #U = np.hstack((X.U.dot(Z.M) + Z.Up, X.U))
     S = tensor.eye(2*self._k)
     V = tensor.stack((X.V, Z.Vp), 1).reshape((X.V.shape[0], -1))
     #V = np.vstack((X.V, Z.Vp))
     return ManifoldElementShared.from_vars((U, S, V), shape=(self._m, self._n), r=self._k)
开发者ID:Nehoroshiy,项目名称:theano_manifold,代码行数:7,代码来源:fixed_rank.py


示例5: stack_and_shared

def stack_and_shared(input):
    """
    This will take a list of input variables, turn them into theano shared variables, and return them stacked
    in a single tensor.

    Parameters
    ----------
    input : list or object
        List of input variables to stack into a single shared tensor.

    Returns
    -------
    tensor
        Symbolic tensor of the input variables stacked, or None if input was None.
    """
    if input is None:
        return None
    elif isinstance(input, list):
        shared_ins = []
        for _in in input:
            try:
                shared_ins.append(theano.shared(_in))
            except TypeError as _:
                shared_ins.append(_in)
        return T.stack(shared_ins)
    else:
        try:
            _output = [theano.shared(input)]
        except TypeError as _:
            _output = [input]
        return T.stack(_output)
开发者ID:JediKoder,项目名称:OpenDeep,代码行数:31,代码来源:misc.py


示例6: finetune_cost_updates

    def finetune_cost_updates(self, center, mu, learning_rate):
        """ This function computes the cost and the updates ."""

        # note : we sum over the size of a datapoint; if we are using
        #        minibatches, L will be a vector, withd one entry per
        #        example in minibatch
        network_output = self.get_output()
        temp = T.pow(center - network_output, 2)    
        
        L =  T.sum(temp, axis=1) 
        # Add the network reconstruction error 
        z = self.get_network_reconst()
        reconst_err = T.sum(T.pow(self.x - z, 2), axis = 1)            
        L = self.beta*L + self.lbd*reconst_err
        
        cost1 = T.mean(L)
        cost2 = self.lbd*T.mean(reconst_err)  
        cost3 = cost1 - cost2

        # compute the gradients of the cost of the `dA` with respect
        # to its parameters
        gparams = T.grad(cost1, self.params)  
        # generate the list of updates
        updates = []
        grad_values = []
        param_norm = []
        for param, delta, gparam in zip(self.params, self.delta, gparams):
            updates.append( (delta, mu*delta - learning_rate * gparam) )
            updates.append( (param, param + mu*mu*delta - (1+mu)*learning_rate*gparam ))
            grad_values.append(gparam.norm(L=2))
            param_norm.append(param.norm(L=2))
        
        grad_ = T.stack(*grad_values)
        param_ = T.stack(*param_norm)
        return ((cost1, cost2, cost3, grad_, param_), updates)
开发者ID:WenjunJiang,项目名称:DCN,代码行数:35,代码来源:multi_layer_km.py


示例7: func

 def func(chol_vec, delta):
     chol = tt.stack([
         tt.stack([tt.exp(0.1 * chol_vec[0]), 0]),
         tt.stack([chol_vec[1], 2 * tt.exp(chol_vec[2])]),
     ])
     cov = tt.dot(chol, chol.T)
     return MvNormalLogp()(cov, delta)
开发者ID:aloctavodia,项目名称:pymc3,代码行数:7,代码来源:test_dist_math.py


示例8: generate

    def generate(self, h_, c_, x_):
        h_a = []
        c_a = []
        for it in range(self.n_levels):
            preact = T.dot(x_, self.W[it])
            preact += T.dot(h_[it], self.U[it]) + self.b[it]

            i = T.nnet.sigmoid(self.slice(preact, 0, self.n_dim))
            f = T.nnet.sigmoid(self.slice(preact, 1, self.n_dim))
            o = T.nnet.sigmoid(self.slice(preact, 2, self.n_dim))
            c = T.tanh(self.slice(preact, 3, self.n_dim))

            c = f * c_[it] + i * c
            h = o * T.tanh(c)

            h_a.append(h)
            c_a.append(c)

            x_ = h

        q = T.dot(h, self.L) + self.b0
        # mask = T.concatenate([T.alloc(np_floatX(1.), q.shape[0] - 1), T.alloc(np_floatX(0.), 1)])
        prob = T.nnet.softmax(q / 1)

        return prob, T.stack(h_a).squeeze(), T.stack(c_a)[0].squeeze()
开发者ID:velicue,项目名称:char-rnn-theano,代码行数:25,代码来源:lstm.py


示例9: _setOutputs

	def _setOutputs(self) :
		inps = []
		for l in self.network.inConnections[self] :
			inps.append(l.outputs)

		self.outputs = tt.stack(inps).reshape((-1, self.nbChannels, self.height, self.width))
		self.testOutputs = tt.stack(inps).reshape((-1, self.nbChannels, self.height, self.width))
开发者ID:BenJamesbabala,项目名称:Mariana,代码行数:7,代码来源:convolution.py


示例10: retr

    def retr(self, X, Z, t=None):
        if t is None:
            t = 1.0
        Qu, Ru = tensor.nlinalg.QRFull(Z.Up)

        # we need rq decomposition here
        Qv, Rv = tensor.nlinalg.QRFull(Z.Vp[::-1].T)
        Rv = Rv.T[::-1]
        Rv[:, :] = Rv[:, ::-1]
        Qv = Qv.T[::-1]

        # now we have rq decomposition (Rv @ Qv = Z.Vp)
        #Rv, Qv = rq(Z.Vp, mode='economic')


        zero_block = tensor.zeros((Ru.shape[0], Rv.shape[1]))
        block_mat = tensor.stack(
            (
                tensor.stack((X.S + t * Z.M, t * Rv), 1).reshape((Rv.shape[0], -1)),
                tensor.stack((t * Ru, zero_block), 1).reshape((Ru.shape[0], -1))
            )
        ).reshape((-1, Ru.shape[1] + Rv.shape[1]))

        Ut, St, Vt = tensor.nlinalg.svd(block_mat, full_matrices=False)

        U = tensor.stack((X.U, Qu), 1).reshape((Qu.shape[0], -1)).dot(Ut[:, :self._k])
        V = Vt[:self._k, :].dot(tensor.stack((X.V, Qv), 0).reshape((-1, Qv.shape[1])))
        # add some machinery eps to get a slightly perturbed element of a manifold
        # even if we have some zeros in S
        S = tensor.diag(St[:self._k]) + tensor.diag(np.spacing(1) * tensor.ones(self._k))
        return ManifoldElementShared.from_vars((U, S, V), shape=(self._m, self._n), r=self._k)
开发者ID:Nehoroshiy,项目名称:theano_manifold,代码行数:31,代码来源:fixed_rank.py


示例11: _batch_vectorization

    def _batch_vectorization(self,**args):
        fun_in = args["fun"]
        symbolic_X_list = args["symbolic_X_list"]
        if "symbolic_c_inp_list" in args and "t" in args:
            t = args["t"]
            symbolic_c_inp_list = args["symbolic_c_inp_list"]
            fun = lambda x,y: fun_in(x,y,t)
        elif "symbolic_c_inp_list" in args and "t" not in args:
            symbolic_c_inp_list = args["symbolic_c_inp_list"]
            fun = fun_in
        elif "symbolic_c_inp_list" not in args and "t" in args:
            t = args["t"]
            symbolic_c_inp_list = []
            fun = lambda x,y: fun_in(x,t)

        fun_list = []
        for i in np.arange(self.number_of_rollouts):
            symbolic_X_list_i = [a[i] for a in symbolic_X_list]
            symbolic_c_inp_list_i = [a[i] for a in symbolic_c_inp_list]
            out_list = fun(symbolic_X_list_i,symbolic_c_inp_list)
            fun_list.append(out_list)
        if type(fun_list[0]) != list:
            return T.stack(fun_list,axis = 0)
        else:
            ziped_list = [list(a) for a in zip(*fun_list)]
            return [T.stack(a,axis = 0) for a in ziped_list]
开发者ID:DoTha,项目名称:ParallelPice,代码行数:26,代码来源:define_Theano_Control_Problem.py


示例12: tangent2ambient

 def tangent2ambient(self, X, Z):
     U = tensor.stack((X.U.dot(Z.M) + Z.Up, X.U), 0).reshape((-1, X.U.shape[1]))
     #U = np.hstack((X.U.dot(Z.M) + Z.Up, X.U))
     S = tensor.eye(2*self._k)
     V = tensor.stack((X.V, Z.Vp), 1).reshape((X.V.shape[0], -1))
     #V = np.vstack((X.V, Z.Vp))
     return (U, S, V)
开发者ID:Nehoroshiy,项目名称:theano_manifold,代码行数:7,代码来源:fixed_rank_splitted.py


示例13: retr

    def retr(self, X, Z, t=None):
        U, S, V = X
        Up, M, Vp = Z
        if t is None:
            t = 1.0
        Qu, Ru = tensor.nlinalg.qr(Up)

        # we need rq decomposition here
        Qv, Rv = tensor.nlinalg.qr(Vp[::-1].T)
        Rv = Rv.T[::-1]
        Rv = Rv[:, ::-1]
        Qv = Qv.T[::-1]

        # now we have rq decomposition (Rv @ Qv = Z.Vp)
        #Rv, Qv = rq(Z.Vp, mode='economic')


        zero_block = tensor.zeros((Ru.shape[0], Rv.shape[1]))
        block_mat = tensor.stack(
            (
                tensor.stack((S + t * M, t * Rv), 1).reshape((Rv.shape[0], -1)),
                tensor.stack((t * Ru, zero_block), 1).reshape((Ru.shape[0], -1))
            )
        ).reshape((-1, Ru.shape[1] + Rv.shape[1]))

        Ut, St, Vt = tensor.nlinalg.svd(block_mat, full_matrices=False)

        U_res = tensor.stack((U, Qu), 1).reshape((Qu.shape[0], -1)).dot(Ut[:, :self._k])
        V_res = Vt[:self._k, :].dot(tensor.stack((V, Qv), 0).reshape((-1, Qv.shape[1])))
        # add some machinery eps to get a slightly perturbed element of a manifold
        # even if we have some zeros in S
        S_res = tensor.diag(St[:self._k]) + tensor.diag(np.spacing(1) * tensor.ones(self._k))
        return (U_res, S_res, V_res)
开发者ID:Nehoroshiy,项目名称:theano_manifold,代码行数:33,代码来源:fixed_rank_splitted.py


示例14: _for_step

    def _for_step(self,
                  xi_t, xf_t, xo_t, xc_t, mask_t,
                  h_tm1, c_tm1,
                  context, context_mask, context_att_trans,
                  hist_h, hist_h_att_trans,
                  b_u):

        # context: (batch_size, context_size, context_dim)

        # (batch_size, att_layer1_dim)
        h_tm1_att_trans = T.dot(h_tm1, self.att_h_W1)

        # (batch_size, context_size, att_layer1_dim)
        att_hidden = T.tanh(context_att_trans + h_tm1_att_trans[:, None, :])

        # (batch_size, context_size, 1)
        att_raw = T.dot(att_hidden, self.att_W2) + self.att_b2

        # (batch_size, context_size)
        ctx_att = T.exp(att_raw).reshape((att_raw.shape[0], att_raw.shape[1]))

        if context_mask:
            ctx_att = ctx_att * context_mask

        ctx_att = ctx_att / T.sum(ctx_att, axis=-1, keepdims=True)

        # (batch_size, context_dim)
        ctx_vec = T.sum(context * ctx_att[:, :, None], axis=1)

        ##### attention over history #####

        if hist_h:
            hist_h = T.stack(hist_h).dimshuffle((1, 0, 2))
            hist_h_att_trans = T.stack(hist_h_att_trans).dimshuffle((1, 0, 2))
            h_tm1_hatt_trans = T.dot(h_tm1, self.hatt_h_W1)

            hatt_hidden = T.tanh(hist_h_att_trans + h_tm1_hatt_trans[:, None, :])
            hatt_raw = T.dot(hatt_hidden, self.hatt_W2) + self.hatt_b2
            hatt_raw = hatt_raw.flatten(2)
            h_att_weights = T.nnet.softmax(hatt_raw)

            # (batch_size, output_dim)
            h_ctx_vec = T.sum(hist_h * h_att_weights[:, :, None], axis=1)
        else:
            h_ctx_vec = T.zeros_like(h_tm1)

        ##### attention over history #####

        i_t = self.inner_activation(xi_t + T.dot(h_tm1 * b_u[0], self.U_i) + T.dot(ctx_vec, self.C_i) + T.dot(h_ctx_vec, self.H_i))
        f_t = self.inner_activation(xf_t + T.dot(h_tm1 * b_u[1], self.U_f) + T.dot(ctx_vec, self.C_f) + T.dot(h_ctx_vec, self.H_f))
        c_t = f_t * c_tm1 + i_t * self.activation(xc_t + T.dot(h_tm1 * b_u[2], self.U_c) + T.dot(ctx_vec, self.C_c) + T.dot(h_ctx_vec, self.H_c))
        o_t = self.inner_activation(xo_t + T.dot(h_tm1 * b_u[3], self.U_o) + T.dot(ctx_vec, self.C_o) + T.dot(h_ctx_vec, self.H_o))
        h_t = o_t * self.activation(c_t)

        h_t = (1 - mask_t) * h_tm1 + mask_t * h_t
        c_t = (1 - mask_t) * c_tm1 + mask_t * c_t

        # ctx_vec = theano.printing.Print('ctx_vec')(ctx_vec)

        return h_t, c_t, ctx_vec
开发者ID:chubbymaggie,项目名称:NL2code,代码行数:60,代码来源:components.py


示例15: forward_prop_step_stack

        def forward_prop_step_stack(x_t, masks, h_prevs, c_prevs, stack_prevs, ptrs_to_top_prevs):
            # determine, for all layers, if this input was a push/pop
            is_push, is_pop = map_push_pop(x_t, self.PUSH, self.POP)
            is_null = get_is_null(x_t, self.NULL)

            nonsymbolic_hs = []
            nonsymbolic_cs = []
            nonsymbolic_stacks = []
            nonsymbolic_ptrs_to_tops = []

            h = x_t
            for i,layer in enumerate(self.layers):
                h, c, stack, ptrs_to_top = layer.forward_prop_stack(h, h_prevs[i,:,:], c_prevs[i,:,:], stack_prevs[i,:,:,:], ptrs_to_top_prevs[i,:,:,:], is_push, is_pop, is_null)
                h = h*masks[:,:,i] / self.dropout # inverted dropout for scaling

                nonsymbolic_hs.append(h)
                nonsymbolic_cs.append(c)
                nonsymbolic_stacks.append(stack)
                nonsymbolic_ptrs_to_tops.append(ptrs_to_top)
            
            h_s = T.stack(nonsymbolic_hs)
            c_s = T.stack(nonsymbolic_cs)
            stack_s = T.stack(nonsymbolic_stacks)
            ptrs_to_top_s = T.stack(nonsymbolic_ptrs_to_tops)

            o_t = self.W_hy.dot(h)
            
            return o_t, h_s, c_s, stack_s, ptrs_to_top_s
开发者ID:the1mane1event,项目名称:vprnn,代码行数:28,代码来源:NWLSTM_Net.py


示例16: local_gpu_sum

def local_gpu_sum(node):
    if isinstance(node.op, tensor.elemwise.CAReduce):
        if node.op.scalar_op == scal.add:
            x, = node.inputs
            if x.owner and x.owner.op == host_from_gpu:
                if node.op.axis is None:
                    reduce_mask = [1] * x.type.ndim
                else:
                    reduce_mask = [0] * x.type.ndim
                    for a in node.op.axis:
                        assert reduce_mask[a] == 0
                        reduce_mask[a] = 1
                gsum = GpuSum(reduce_mask)
                pattern = "".join(str(i) for i in reduce_mask)
                if hasattr(gsum, "c_code_reduce_%s" % pattern):
                    rval = host_from_gpu(gsum(gpu_from_host(x)))
                    if rval.type == node.outputs[0].type:
                        return [rval]
                    else:
                        print >>sys.stderr, "WARNING: local_gpu_sum got type wrong"
                        return None
                else:

                    # Try to make a simpler pattern based on reshaping
                    # The principle is that if two adjacent dimensions have the same value in
                    # the reduce_mask, then we can reshape to make them a single dimension, do
                    # the sum, and then reshape to get them back.

                    shape_of = node.env.shape_feature.shape_of

                    x_shape = shape_of[x]

                    new_in_shp = [x_shape[0]]
                    new_mask = [reduce_mask[0]]
                    for i in range(1, x.type.ndim):
                        if reduce_mask[i] == reduce_mask[i - 1]:
                            new_in_shp[-1] *= x_shape[i]
                        else:
                            new_mask.append(reduce_mask[i])
                            new_in_shp.append(x_shape[i])

                    pattern = "".join(str(i) for i in new_mask)
                    new_gsum = GpuSum(new_mask)
                    if hasattr(new_gsum, "c_code_reduce_%s" % pattern):
                        reshaped_x = x.reshape(tensor.stack(*new_in_shp))
                        sum_reshaped_x = host_from_gpu(new_gsum(gpu_from_host(reshaped_x)))

                        if sum_reshaped_x.ndim != node.outputs[0].ndim:
                            unreshaped_sum = sum_reshaped_x.reshape(tensor.stack(*shape_of[node.outputs[0]]))
                        else:
                            unreshaped_sum = sum_reshaped_x
                        if unreshaped_sum.type == node.outputs[0].type:
                            return [unreshaped_sum]
                        else:
                            print >>sys.stderr, "WARNING: local_gpu_sum got type wrong"
                            return None

                        raise Exception("GpuSum don't have implemented the pattern", pattern)
    return False
开发者ID:olivierverdier,项目名称:Theano,代码行数:59,代码来源:opt.py


示例17: new_attn_step

	def new_attn_step(self,c_t,g_tm1,m_im1,q):
		cWq = T.stack([T.dot(T.dot(c_t, self.Wb), q)])
        	cWm = T.stack([T.dot(T.dot(c_t, self.Wb), m_im1)])
		z = T.concatenate([c_t,m_im1,q,c_t*q,c_t*m_im1,T.abs_(c_t-q),T.abs_(c_t-m_im1),cWq,cWm],axis=0)
		l_1 = T.dot(self.W1, z) + self.b1
		l_1 = T.tanh(l_1)
		l_2 = T.dot(self.W2,l_1) + self.b2
		return l_2[0]
开发者ID:19rick96,项目名称:DMN,代码行数:8,代码来源:DMN_orig.py


示例18: max_pool

def max_pool(images, imgshp, maxpoolshp):
    """Implements a max pooling layer

    Takes as input a 2D tensor of shape batch_size x img_size and
    performs max pooling.  Max pooling downsamples by taking the max
    value in a given area, here defined by maxpoolshp. Outputs a 2D
    tensor of shape batch_size x output_size.

    :param images: 2D tensor containing images on which to apply convolution.
                   Assumed to be of shape batch_size x img_size
    :param imgshp: tuple containing image dimensions
    :param maxpoolshp: tuple containing shape of area to max pool over

    :return: out1, symbolic result (2D tensor)
    :return: out2, logical shape of the output
    """
    N = numpy
    poolsize = N.int64(N.prod(maxpoolshp))

    # imgshp contains either 2 entries (height,width) or 3 (nfeatures,h,w)
    # in the first case, default nfeatures to 1
    if N.size(imgshp) == 2:
        imgshp = (1,) + imgshp

    # construct indices and index pointers for sparse matrix, which,
    # when multiplied with input images will generate a stack of image
    # patches
    indices, indptr, spmat_shape, sptype, outshp = \
            convolution_indices.conv_eval(imgshp, maxpoolshp,
                                          maxpoolshp, mode='valid')

#    print 'XXXXXXXXXXXXXXXX MAX POOLING LAYER XXXXXXXXXXXXXXXXXXXX'
#    print 'imgshp = ', imgshp
#    print 'maxpoolshp = ', maxpoolshp
#    print 'outshp = ', outshp

    # build sparse matrix, then generate stack of image patches
    csc = theano.sparse.CSM(sptype)(N.ones(indices.size), indices,
                                    indptr, spmat_shape)
    patches = sparse.structured_dot(csc, images.T).T

    pshape = tensor.stack([images.shape[0] *\
                               tensor.as_tensor(N.prod(outshp)),
                           tensor.as_tensor(imgshp[0]),
                           tensor.as_tensor(poolsize)])
    patch_stack = tensor.reshape(patches, pshape, ndim=3)

    out1 = tensor.max(patch_stack, axis=2)

    pshape = tensor.stack([images.shape[0],
                           tensor.as_tensor(N.prod(outshp)),
                           tensor.as_tensor(imgshp[0])])
    out2 = tensor.reshape(out1, pshape, ndim=3)

    out3 = tensor.DimShuffle(out2.broadcastable, (0, 2, 1))(out2)

    return tensor.flatten(out3, 2), outshp
开发者ID:12190143,项目名称:Theano,代码行数:57,代码来源:sp.py


示例19: new_attention_step

 def new_attention_step(self, ct, prev_g, mem, q_q):
     cWq = T.stack([T.dot(T.dot(ct, self.W_b), q_q)])
     cWm = T.stack([T.dot(T.dot(ct, self.W_b), mem)])
     z = T.concatenate([ct, mem, q_q, ct * q_q, ct * mem, (ct - q_q) ** 2, (ct - mem) ** 2, cWq, cWm])        
     l_1 = T.dot(self.W_1, z) + self.b_1
     l_1 = T.tanh(l_1)
     l_2 = T.dot(self.W_2, l_1) + self.b_2
     G = T.nnet.sigmoid(l_2)[0]
     return G
开发者ID:BinbinBian,项目名称:DMN_MCTest,代码行数:9,代码来源:dmn_spv.py


示例20: _grad_single

    def _grad_single(self, ct, s, lnC2, GAMMI2):
        lnC = lnC2
        GAMMI = GAMMI2
        v = self.v#T.as_tensor(self.v)[:,ct:]
        v0 = T.as_tensor(v[v[:,0]==0, :])
        v1 = T.as_tensor(v[v[:,0]==1, :])

        cnp = v.shape[0]

        # Gradient of fE wrt the priors over final state
        [ofE, oxS], upd_fE_single = th.scan(fn=self._free_energy,
                                   sequences=v,
                                   non_sequences=[s,self.h,lnC,self.b])
        ofE0 = ofE[v0].sum()
        ofE1 = ofE[v1].sum()

        dFE0dlnC = T.jacobian(ofE0, lnC)
        dFE1dlnC = T.jacobian(ofE1, lnC)
        dFEdlnC  = T.jacobian(ofE,  lnC)
        ofE_ = T.vector()
        ofE_.tag.test_value = ofE.tag.test_value

        # Gradient of Gamma with respect to its initial condition:
        GAMMA, upd_GAMMA = th.scan(fn=self._upd_gamma,
               outputs_info=[GAMMI],
               non_sequences=[ofE, self.lambd, self.alpha, self.beta, cnp],
               n_steps=4)
        dGdg = T.grad(GAMMA[-1], GAMMI)

        dGdfE = T.jacobian(GAMMA[-1], ofE)
        dGdlnC = dGdfE.dot(dFEdlnC)

        out1 = ofE0
        out2 = ofE1
        maxout = T.max([out1, out2])

        exp_out1 = T.exp(GAMMA[-1]*(out1 - maxout))
        exp_out2 = T.exp(GAMMA[-1]*(out2 - maxout))
        norm_const = exp_out1 + exp_out2

        # Derivative wrt the second output (gammi):
        Jac1_gammi = (-(out1-out2)*dGdg*
                T.exp(GAMMA[-1]*(out1+out2 - 2*maxout))/(norm_const**2))
        Jac2_gammi = -Jac1_gammi
#        dfd1_tZ = Jac1_gammi*dCdf[1][0]+ Jac2_gammi*dCdf[1][1]

        # Derivative wrt first input (lnc)
        Jac1_lnC = (T.exp(GAMMA[-1]*(out1 + out2 - 2*maxout))/(norm_const**2)*
                  (-dGdlnC*(out1 - out2) - GAMMA[-1]*(dFE0dlnC - dFE1dlnC)))
        Jac2_lnC = -Jac1_lnC

        Jac1 = T.concatenate([T.stack(Jac1_gammi), Jac1_lnC])
        Jac2 = T.concatenate([T.stack(Jac2_gammi), Jac2_lnC])
        self.debug = [Jac1_lnC, Jac2_lnC, Jac2_gammi, Jac1_gammi, dFE0dlnC,
                      dFE1dlnC, dGdg, out1, out2, v0, v1, v, ct]
        return Jac1, Jac2
开发者ID:dcuevasr,项目名称:actinf,代码行数:56,代码来源:actinfThClass.py



注:本文中的theano.tensor.stack函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。


鲜花

握手

雷人

路过

鸡蛋
该文章已有0人参与评论

请发表评论

全部评论

专题导读
上一篇:
Python tensor.std函数代码示例发布时间:2022-05-27
下一篇:
Python tensor.squeeze函数代码示例发布时间:2022-05-27
热门推荐
阅读排行榜

扫描微信二维码

查看手机版网站

随时了解更新最新资讯

139-2527-9053

在线客服(服务时间 9:00~18:00)

在线QQ客服
地址:深圳市南山区西丽大学城创智工业园
电邮:jeky_zhao#qq.com
移动电话:139-2527-9053

Powered by 互联科技 X3.4© 2001-2213 极客世界.|Sitemap