• 设为首页
  • 点击收藏
  • 手机版
    手机扫一扫访问
    迪恩网络手机版
  • 关注官方公众号
    微信扫一扫关注
    公众号

Python tensor.exp函数代码示例

原作者: [db:作者] 来自: [db:来源] 收藏 邀请

本文整理汇总了Python中theano.tensor.exp函数的典型用法代码示例。如果您正苦于以下问题:Python exp函数的具体用法?Python exp怎么用?Python exp使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。



在下文中一共展示了exp函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。

示例1: K

 def K(self, x, y):
     l = tensor.exp(self.log_lenscale)
     d = ((x ** 2).sum(axis=1).dimshuffle(0, 'x')
             + (y ** 2).sum(axis=1)
             - 2 * tensor.dot(x, y.T))
     K = tensor.exp(-tensor.sqrt(d) / l)
     return K
开发者ID:cyip,项目名称:hyperopt,代码行数:7,代码来源:theano_gp.py


示例2: __init__

 def __init__(self, alpha, beta, *args, **kwargs):
     super(Weibull, self).__init__(*args, **kwargs)
     self.alpha = alpha
     self.beta = beta
     self.mean = beta * T.exp(gammaln(1 + 1./alpha))
     self.median = beta * T.exp(gammaln(T.log(2)))**(1./alpha)
     self.variance = (beta**2) * T.exp(gammaln(1 + 2./alpha - self.mean**2))
开发者ID:gurganious,项目名称:pymc3,代码行数:7,代码来源:continuous.py


示例3: learn_step

	def learn_step(self):
		
		#this is a list of gradients w.r.t. every parameter in self.params
		gparams=T.grad(self.loss, self.params)
		
		updates=OrderedDict()
		#updates the momentums and parameter values
		i=0
		for param, gparam, momentum, lrate, momentum_coeff in zip(self.params, gparams, self.momentums, self.lrates, self.momentum_coeffs):
			
			#if param.ndim==2:
			#	gparam=T.dot(T.dot(param,param.T),gparam)
			
			if param.name=='log_stddev':
				gparam=gparam*2.0*T.exp(2.0*param)
			
			if param.name=='M':
				gparam=gparam*T.exp(1.0*self.params[i+2]).dimshuffle('x',0)
			
			if param.name=='b':
				gparam=gparam*T.exp(1.0*self.params[i+1])
			
			new_momentum=momentum_coeff*momentum - lrate*gparam*self.global_lrate
			new_param=param + new_momentum
			
			updates[param]=new_param
			updates[momentum]=new_momentum
			i+=1
		
		updates[self.global_lrate]=self.global_lrate*self.lrate_decay
		
		return updates
开发者ID:float650,项目名称:sensorimotor,代码行数:32,代码来源:learning_algs.py


示例4: output_probabilistic

    def output_probabilistic(self, m_w_previous, v_w_previous):
        if (self.non_linear):
            m_in = self.m_w - m_w_previous
            v_in = self.v_w
            # We compute the mean and variance after the ReLU activation
            lam = self.lam
            v_1 = 1 + 2*lam*v_in
            v_1_inv = v_1**-1

            s_1 = T.prod(v_1,axis=1)**-0.5
            v_2 = 1 + 4*lam*v_in
            v_2_inv = v_2**-1
            s_2 = T.prod(v_2,axis=1)**-0.5
            v_inv = v_in**-1
            exponent1 = m_in**2*(1 - v_1_inv)*v_inv
            exponent1 = T.sum(exponent1,axis=1)
            exponent2 = m_in**2*(1 - v_2_inv)*v_inv
            exponent2 = T.sum(exponent2,axis=1)
            m_a = s_1*T.exp(-0.5*exponent1)
            v_a = s_2*T.exp(-0.5*exponent2) - m_a**2

            return (m_a, v_a)

        else:
            m_w_previous_with_bias = \
            T.concatenate([ m_w_previous, T.alloc(1, 1) ], 0)
            v_w_previous_with_bias = \
            T.concatenate([ v_w_previous, T.alloc(0, 1) ], 0)

            m_linear = T.dot(self.m_w, m_w_previous_with_bias) / T.sqrt(self.n_inputs)
            v_linear = (T.dot(self.v_w, v_w_previous_with_bias) + \
                T.dot(self.m_w**2, v_w_previous_with_bias) + \
                T.dot(self.v_w, m_w_previous_with_bias**2)) / self.n_inputs
            return (m_linear, v_linear)
开发者ID:jshe857,项目名称:thesis-rbfnn,代码行数:34,代码来源:network_layer.py


示例5: filterbank_matrices

def filterbank_matrices(center_y, center_x, delta, sigma, N, imgshp):
    """Create a Fy and a Fx

    Parameters
    ----------
    center_y : T.vector (shape: batch_size)
    center_x : T.vector (shape: batch_size)
        Y and X center coordinates for the attention window
    delta : T.vector (shape: batch_size)
    sigma : T.vector (shape: batch_size)

    Returns
    -------
        FY, FX
    """
    tol = 1e-4
    img_height, img_width = imgshp
    muX = center_x.dimshuffle([0, 'x']) + delta.dimshuffle([0, 'x'])*(T.arange(N)-N/2-0.5)
    muY = center_y.dimshuffle([0, 'x']) + delta.dimshuffle([0, 'x'])*(T.arange(N)-N/2-0.5)

    a = T.arange(img_width)
    b = T.arange(img_height)

    FX = T.exp( -(a-muX.dimshuffle([0,1,'x']))**2 / 2. / sigma.dimshuffle([0,'x','x'])**2 )
    FY = T.exp( -(b-muY.dimshuffle([0,1,'x']))**2 / 2. / sigma.dimshuffle([0,'x','x'])**2 )
    FX = FX / (FX.sum(axis=-1).dimshuffle(0, 1, 'x') + tol)
    FY = FY / (FY.sum(axis=-1).dimshuffle(0, 1, 'x') + tol)

    return FY, FX
开发者ID:Xi-Liang,项目名称:lasagne-draw,代码行数:29,代码来源:draw_helpers.py


示例6: get_gradients

    def get_gradients(self, X, Y, weights=1.0):
        W_mean, W_ls, b_mean, b_ls = self.parameters

        mean, log_sigma = self.sample_expected(Y)
        sigma = tensor.exp(log_sigma)

        cost = -log_sigma - 0.5 * (X - mean) ** 2 / tensor.exp(2 * log_sigma)
        if weights != 1.0:
            cost = -weights.dimshuffle(0, "x") * cost

        cost_scaled = sigma ** 2 * cost
        cost_gscale = (sigma ** 2).sum(axis=1).dimshuffle([0, "x"])
        cost_gscale = cost_gscale * cost

        gradients = OrderedDict()

        params = Selector(self.mlp).get_parameters()
        for pname, param in params.iteritems():
            gradients[param] = tensor.grad(cost_gscale.sum(), param, consider_constant=[X, Y])

        gradients[W_mean] = tensor.grad(cost_scaled.sum(), W_mean, consider_constant=[X, Y])
        gradients[b_mean] = tensor.grad(cost_scaled.sum(), b_mean, consider_constant=[X, Y])

        gradients[W_ls] = tensor.grad(cost_scaled.sum(), W_ls, consider_constant=[X, Y])
        gradients[b_ls] = tensor.grad(cost_scaled.sum(), b_ls, consider_constant=[X, Y])

        return gradients
开发者ID:jbornschein,项目名称:bihm,代码行数:27,代码来源:prob_layers.py


示例7: step

    def step(xinp_h1_t, xgate_h1_t,
             xinp_h2_t, xgate_h2_t,
             h1_tm1, h2_tm1, k_tm1, w_tm1, ctx):
        attinp_h1, attgate_h1 = att_to_h1.proj(w_tm1)

        h1_t = cell1.step(xinp_h1_t + attinp_h1, xgate_h1_t + attgate_h1,
                          h1_tm1)
        h1inp_h2, h1gate_h2 = h1_to_h2.proj(h1_t)

        a_t = h1_t.dot(h1_to_att_a)
        b_t = h1_t.dot(h1_to_att_b)
        k_t = h1_t.dot(h1_to_att_k)

        a_t = tensor.exp(a_t)
        b_t = tensor.exp(b_t)
        k_t = k_tm1 + tensor.exp(k_t)

        ss4 = calc_phi(k_t, a_t, b_t, u)
        ss5 = ss4.dimshuffle(0, 1, 'x')
        ss6 = ss5 * ctx.dimshuffle(1, 0, 2)
        w_t = ss6.sum(axis=1)

        attinp_h2, attgate_h2 = att_to_h2.proj(w_t)
        h2_t = cell2.step(xinp_h2_t + h1inp_h2 + attinp_h2,
                          xgate_h2_t + h1gate_h2 + attgate_h2, h2_tm1)
        return h1_t, h2_t, k_t, w_t
开发者ID:feynmanliang,项目名称:crikey,代码行数:26,代码来源:fruitspeecher_multiscale.py


示例8: bbox_transform_inv

def bbox_transform_inv(boxes, deltas):
    if boxes.shape[0] == 0:
        return T.zeros((0, deltas.shape[1]), dtype=deltas.dtype)

    boxes = boxes.astype(deltas.dtype)

    widths = boxes[:, 2] - boxes[:, 0] + 1.0
    heights = boxes[:, 3] - boxes[:, 1] + 1.0
    ctr_x = boxes[:, 0] + 0.5 * widths
    ctr_y = boxes[:, 1] + 0.5 * heights

    dx = deltas[:, 0::4]
    dy = deltas[:, 1::4]
    dw = deltas[:, 2::4]
    dh = deltas[:, 3::4]

    pred_ctr_x = dx * widths.dimshuffle(0,'x') + ctr_x.dimshuffle(0,'x')
    pred_ctr_y = dy * heights.dimshuffle(0,'x') + ctr_y.dimshuffle(0,'x')
    pred_w = T.exp(dw) * widths.dimshuffle(0,'x')
    pred_h = T.exp(dh) * heights.dimshuffle(0,'x')

    pred_boxes = T.zeros_like(deltas, dtype=deltas.dtype)
    # x1
    pred_boxes = T.set_subtensor(pred_boxes[:, 0::4], pred_ctr_x - 0.5 * pred_w)
    # y1
    pred_boxes = T.set_subtensor(pred_boxes[:, 1::4], pred_ctr_y - 0.5 * pred_h)
    # x2
    pred_boxes = T.set_subtensor(pred_boxes[:, 2::4], pred_ctr_x + 0.5 * pred_w)
    # y2
    pred_boxes = T.set_subtensor(pred_boxes[:, 3::4], pred_ctr_y + 0.5 * pred_h)

    return pred_boxes
开发者ID:smajida,项目名称:faster_r_cnn,代码行数:32,代码来源:bbox.py


示例9: softmax_neg

 def softmax_neg(self, X):
     if hasattr(self, 'hack_matrix'):
         X = X * self.hack_matrix
         e_x = T.exp(X - X.max(axis=1).dimshuffle(0, 'x')) * self.hack_matrix
     else:
         e_x = T.fill_diagonal(T.exp(X - X.max(axis=1).dimshuffle(0, 'x')), 0)
     return e_x / e_x.sum(axis=1).dimshuffle(0, 'x')
开发者ID:marcromeyn,项目名称:GRU4Rec,代码行数:7,代码来源:gru4rec.py


示例10: model

        def model(x, p, p_dropout, noise):
            input_size = x.shape[1]

            h0 = p.W_emb[x]  # (seq_len, batch_size, emb_size)
            h0 = dropout(h0, p_dropout)

            cost, h1, c1, h2, c2 = [0., b1_h, b1_c, b2_h, b2_c]
            eps = srnd.normal((self.hp.seq_size, input_size, self.n_zpt), dtype=theano.config.floatX)
            
            for t in xrange(0, self.hp.seq_size):
                if t >= self.hp.warmup_size:
                    pyx = softmax(T.dot(h2, T.transpose(p.W_emb)))
                    cost += T.sum(T.nnet.categorical_crossentropy(pyx, theano_one_hot(x[t], n_tokens)))

                h_x = concatenate([h0[t], h2], axis=1)
                h1, c1 = lstm(h_x, h1, c1, p.W1, p.V1, p.b1)
                h1 = dropout(h1, p_dropout)

                mu_encoder = T.dot(h1, p.Wmu) + p.bmu
                if noise:
                    log_sigma_encoder = 0.5*(T.dot(h1, p.Wsi) + p.bsi) 
                    cost += -0.5* T.sum(1 + 2*log_sigma_encoder - mu_encoder**2 - T.exp(2*log_sigma_encoder)) * 0.01
                    z = mu_encoder + eps[t]*T.exp(log_sigma_encoder)
                else:
                    z = mu_encoder

                h2, c2 = lstm(z, h2, c2, p.W2, p.V2, p.b2)
                h2 = dropout(h2, p_dropout)

            h_updates = [(b1_h, h1), (b1_c, c1), (b2_h, h2), (b2_c, c2)]
            return cost, h_updates
开发者ID:Carps,项目名称:Theano-Lights,代码行数:31,代码来源:lm_draw.py


示例11: softmax_ratio

def softmax_ratio(numer, denom):
    """
    .. todo::

        WRITEME properly

    Parameters
    ----------
    numer : Variable
        Output of a softmax.
    denom : Variable
        Output of a softmax.

    Returns
    -------
    ratio : Variable
        numer / denom, computed in a numerically stable way
    """

    numer_Z = arg_of_softmax(numer)
    denom_Z = arg_of_softmax(denom)
    numer_Z -= numer_Z.max(axis=1).dimshuffle(0, 'x')
    denom_Z -= denom_Z.min(axis=1).dimshuffle(0, 'x')

    new_num = T.exp(numer_Z - denom_Z) * (T.exp(denom_Z).sum(
        axis=1).dimshuffle(0, 'x'))
    new_den = (T.exp(numer_Z).sum(axis=1).dimshuffle(0, 'x'))

    return new_num / new_den
开发者ID:dzeno,项目名称:pylearn2,代码行数:29,代码来源:nnet.py


示例12: initialise

	def initialise(self):
		rng = np.random.RandomState(23455)
		inpt = self.inpt
		w_shp = (self.in_dim,self.out_dim)
		w_bound = np.sqrt(self.out_dim)
		W_mu = theano.shared( np.asarray(
        rng.normal(0.,0.01,size=w_shp),
            dtype=inpt.dtype), name ='w_post_mu')

		b_shp = (self.out_dim,)
		b_mu = theano.shared(np.asarray(
            np.zeros(self.out_dim),
            dtype=inpt.dtype), name ='b_post_mu')
		W_sigma = theano.shared( np.asarray(
        rng.normal(0.,0.01,size=w_shp),
            dtype=inpt.dtype), name ='w_post_sigm')

		b_sigma = theano.shared(np.asarray(
            np.zeros(self.out_dim),
            dtype=inpt.dtype), name ='b_post_sigm')        #Find the hidden variable z
		self.mu_encoder = T.dot(self.inpt,W_mu) +b_mu
		self.log_sigma_encoder =0.5*(T.dot(self.inpt,W_sigma) + b_sigma)
		self.output =self.mu_encoder +T.exp(self.log_sigma_encoder)*self.eps.astype(theano.config.floatX)
		self.prior =  0.5* T.sum(1 + 2*self.log_sigma_encoder - self.mu_encoder**2 - T.exp(2*self.log_sigma_encoder),axis=1).astype(theano.config.floatX)
		self.params = [W_mu,b_mu,W_sigma,b_sigma]
开发者ID:KyriacosShiarli,项目名称:SingNet,代码行数:25,代码来源:layers.py


示例13: cost

    def cost(self, Y, Y_hat):
        """
        Y must be one-hot binary. Y_hat is a softmax estimate.
        of Y. Returns negative log probability of Y under the Y_hat
        distribution.
        """
        y_probclass, y_probcluster = Y_hat
        #Y = self._group_dot.fprop(Y, Y_hat)
        
        CLS = self.array_clusters[T.cast(T.argmax(Y,axis=1),'int32')]
        #theano.printing.Print('value of cls')(CLS)
        assert hasattr(y_probclass, 'owner')
        owner = y_probclass.owner
        assert owner is not None
        op = owner.op
        if isinstance(op, Print):
          assert len(owner.inputs) == 1
          y_probclass, = owner.inputs
          owner = y_probclass.owner
          op = owner.op
        assert isinstance(op, T.nnet.Softmax)

        z_class ,= owner.inputs
        assert z_class.ndim == 2

        assert hasattr(y_probcluster, 'owner')
        owner = y_probcluster.owner
        assert owner is not None
        op = owner.op
        if isinstance(op, Print):
            assert len(owner.inputs) == 1
            y_probcluster, = owner.inputs
            owner = y_probcluster.owner
            op = owner.op
        assert isinstance(op, T.nnet.Softmax)
        z_cluster ,= owner.inputs
        assert z_cluster.ndim == 2

        z_class = z_class - z_class.max(axis=1).dimshuffle(0, 'x')
        log_prob = z_class - T.log(T.exp(z_class).sum(axis=1).dimshuffle(0, 'x'))
        # we use sum and not mean because this is really one variable per row
        # Y = OneHotFormatter(self.n_classes).theano_expr(
        #                         T.addbroadcast(Y,0,1).dimshuffle(0).astype('uint32'))
        log_prob_of = (Y * log_prob).sum(axis=1)
        assert log_prob_of.ndim == 1

        # cluster
        z_cluster = z_cluster - z_cluster.max(axis=1).dimshuffle(0, 'x')
        log_prob_cls = z_cluster - T.log(T.exp(z_cluster).sum(axis=1).dimshuffle(0, 'x'))

        out = OneHotFormatter(self.n_clusters).theano_expr(CLS.astype('int32'))
        #CLS = OneHotFormatter(self.n_clusters).theano_expr(
         #                        T.addbroadcast(CLS, 1).dimshuffle(0).astype('uint32'))
        log_prob_of_cls = (out * log_prob_cls).sum(axis=1)
        assert log_prob_of_cls.ndim == 1

        # p(w|history) = p(c|s) * p(w|c,s)
        log_prob_of = log_prob_of + log_prob_of_cls
        rval = log_prob_of.mean()        
        return - rval
开发者ID:Sandy4321,项目名称:lisa_intern,代码行数:60,代码来源:mlp.py


示例14: entropy_exp

def entropy_exp(X, g=None, b=None, u=None, s=None, a=1., e=1e-8):
    if X.ndim == 4:
        if u is not None and s is not None:
            b_u = u.dimshuffle('x', 0, 'x', 'x')
            b_s = s.dimshuffle('x', 0, 'x', 'x')
        else:
            b_u = T.mean(X, axis=[0, 2, 3]).dimshuffle('x', 0, 'x', 'x')
            b_s = T.mean(T.sqr(X - b_u), axis=[0, 2, 3]).dimshuffle('x', 0, 'x', 'x')
        if a != 1:
            b_u = (1. - a)*0. + a*b_u
            b_s = (1. - a)*1. + a*b_s
        X = (X - b_u) / T.sqrt(b_s + e)
        if g is not None and b is not None:
            X = X*T.exp(g.dimshuffle('x', 0, 'x', 'x'))+b.dimshuffle('x', 0, 'x', 'x')
    elif X.ndim == 2:
        if u is None and s is None:
            u = T.mean(X, axis=0)
            s = T.mean(T.sqr(X - u), axis=0)
        if a != 1:
            u = (1. - a)*0. + a*u
            s = (1. - a)*1. + a*s
        X = (X - u) / T.sqrt(s + e)
        if g is not None and b is not None:
            X = X*T.exp(g)+b
    else:
        raise NotImplementedError
    return X
开发者ID:taesupkim,项目名称:dcgan_code,代码行数:27,代码来源:energy_rbm_cifar10_0.py


示例15: nn2att

 def nn2att(self, l):
     """Convert neural-net outputs to attention parameters
 
     Parameters
     ----------
     l : tensor (batch_size x 5)
 
     Returns
     -------
     center_y : vector (batch_size)
     center_x : vector (batch_size)
     delta : vector (batch_size)
     sigma : vector (batch_size)
     gamma : vector (batch_size)
     """
     center_y  = l[:,0]
     center_x  = l[:,1]
     log_delta = l[:,2]
     log_sigma = l[:,3]
     log_gamma = l[:,4]
 
     delta = T.exp(log_delta)
     sigma = T.exp(log_sigma/2.)
     gamma = T.exp(log_gamma).dimshuffle(0, 'x')
 
     # normalize coordinates
     center_x = (center_x+1.)/2. * self.img_width
     center_y = (center_y+1.)/2. * self.img_height
     delta = (max(self.img_width, self.img_height)-1) / (self.N-1) * delta
 
     return center_y, center_x, delta, sigma, gamma
开发者ID:ssfg,项目名称:draw,代码行数:31,代码来源:attention.py


示例16: createGradientFunctions

    def createGradientFunctions(self):
        #create
        X = T.dmatrices("X")
        mu, logSigma, u, v, f, R = T.dcols("mu", "logSigma", "u", "v", "f", "R")
        mu = sharedX( np.random.normal(10, 10, (self.dimTheta, 1)), name='mu') 
        logSigma = sharedX(np.random.uniform(0, 4, (self.dimTheta, 1)), name='logSigma')
        logLambd = sharedX(np.matrix(np.random.uniform(0, 10)),name='logLambd')
        logLambd = T.patternbroadcast(T.dmatrix("logLambd"),[1,1])
        negKL = 0.5 * T.sum(1 + 2*logSigma - mu ** 2 - T.exp(logSigma) ** 2)
        theta = mu+T.exp(logSigma)*v
        W=theta
        y=X[:,0]
        X_sim=X[:,1:]
        f = (T.dot(X_sim,W)+u).flatten()
        
        gradvariables = [mu, logSigma, logLambd]
        
        
        logLike = T.sum(-(0.5 * np.log(2 * np.pi) + logLambd) - 0.5 * ((y-f)/(T.exp(logLambd)))**2)

        logp = (negKL + logLike)/self.m

        optimizer = -logp
        
        self.negKL = th.function([mu, logSigma], negKL, on_unused_input='ignore')
        self.f = th.function(gradvariables + [X,u,v], f, on_unused_input='ignore')
        self.logLike = th.function(gradvariables + [X, u, v], logLike,on_unused_input='ignore')
        derivatives = T.grad(logp,gradvariables)
        derivatives.append(logp)

        self.gradientfunction = th.function(gradvariables + [X, u, v], derivatives, on_unused_input='ignore')
        self.lowerboundfunction = th.function(gradvariables + [X, u, v], logp, on_unused_input='ignore')

        self.optimizer = BatchGradientDescent(objective=optimizer, params=gradvariables,inputs = [X,u,v],conjugate=True,max_iter=1)
开发者ID:onenoc,项目名称:lfvbae,代码行数:34,代码来源:lfvbaeold.py


示例17: _step

	def _step(self,xg_t, xo_t, xc_t, mask_tm1,h_tm1, c_tm1, u_g, u_o, u_c):

		h_mask_tm1 = mask_tm1 * h_tm1
		c_mask_tm1 = mask_tm1 * c_tm1
		act = T.tensordot( xg_t + h_mask_tm1, u_g , [[1],[2]])
		gate = T.nnet.softmax(act.reshape((-1, act.shape[-1]))).reshape(act.shape)

		c_tilda = self.activation(xc_t + T.dot(h_mask_tm1, u_c))

		sigma_se = self.k_parameters[0]
		sigma_per = self.k_parameters[1]
		sigma_b_lin = self.k_parameters[2]
		sigma_v_lin = self.k_parameters[3]
		sigma_rq = self.k_parameters[4]

		l_se = self.k_parameters[5]
		l_per = self.k_parameters[6]
		l_lin = self.k_parameters[7]
		l_rq = self.k_parameters[8]

		alpha_rq = self.k_parameters[9]
		p_per = self.k_parameters[10]

		k_se = T.pow(sigma_se,2) * T.exp( -T.pow(c_mask_tm1 - c_tilda,2) / (2* T.pow(l_se,2) + self.EPS))
		k_per = T.pow(sigma_per,2) * T.exp( -2*T.pow(T.sin( math.pi*(c_mask_tm1 - c_tilda)/ (p_per + self.EPS) ),2)	 / ( T.pow(l_per,2) + self.EPS ))
		k_lin = T.pow(sigma_b_lin,2) + T.pow(sigma_v_lin,2)	 * (c_mask_tm1 - l_lin) * (c_tilda - l_lin )
		k_rq = T.pow(sigma_rq,2) * T.pow( 1 + T.pow( (c_mask_tm1 - c_tilda),2)	/ ( 2 * alpha_rq * T.pow(l_rq,2) + self.EPS), -alpha_rq)

		ops = [c_mask_tm1,c_tilda,k_se, k_per, k_lin,k_rq]
		yshuff = T.as_tensor_variable( ops, name='yshuff').dimshuffle(1,2,0)
		c_t = (gate.reshape((-1,gate.shape[-1])) * yshuff.reshape((-1,yshuff.shape[-1]))).sum(axis = 1).reshape(gate.shape[:2])
		o_t = self.inner_activation(xo_t + T.dot(h_mask_tm1, u_o))
		h_t = o_t * self.activation(c_t)
		return h_t, c_t
开发者ID:hongyuanzhu,项目名称:keras,代码行数:34,代码来源:recurrentpp_soft.py


示例18: compute_f_mu

def compute_f_mu(x, t, params):
	[centers, spreads, biases, M, b]=params
	diffs=x.dimshuffle(0,1,2,'x')-centers.dimshuffle('x','x',0,1)
	scaled_diffs=(diffs**2)*T.exp(spreads).dimshuffle('x','x',0,1)
	exp_terms=T.sum(scaled_diffs,axis=2)+biases.dimshuffle('x','x',0)*0.0
	h=T.exp(-exp_terms)
	sumact=T.sum(h,axis=2)
	#Normalization
	hnorm=h/sumact.dimshuffle(0,1,'x')
	z=T.dot(hnorm,M)
	z=T.reshape(z,(t.shape[0],t.shape[1],ntgates,nx))+b.dimshuffle('x','x',0,1) #nt by nb by ntgates by nx
	#z=z+T.reshape(x,(t.shape[0],t.shape[1],1,nx))
	
	tpoints=T.cast(T.arange(ntgates),'float32')/T.cast(ntgates-1,'float32')
	tpoints=T.reshape(tpoints, (1,1,ntgates))
	#tgating=T.exp(T.dot(t,muWT)+mubT) #nt by nb by ntgates
	tgating=T.exp(-kT*(tpoints-t)**2)
	tgating=tgating/T.reshape(T.sum(tgating, axis=2),(t.shape[0], t.shape[1], 1))
	tgating=T.reshape(tgating,(t.shape[0],t.shape[1],ntgates,1))
	
	mult=z*tgating
	
	out=T.sum(mult,axis=2)
	
	#out=out+x
	
	return T.cast(out,'float32')
开发者ID:float650,项目名称:Diffusion-Model,代码行数:27,代码来源:diffusion_model_learn_betafunc.py


示例19: flow

def flow(init_W,init_b,nData):
    import theano
    import theano.tensor as T

    n_layers = len(init_b)

    bias = []
    weights = []
    muStates = []
    for layer_i in xrange(n_layers):
        bias.append(theano.shared(value=init_b[layer_i],
                                    name='b'+str(layer_i),
                                    borrow=True))
        weights.append(theano.shared(value=init_W[layer_i],
                                    name='W'+str(layer_i),
                                    borrow=True))
        muStates.append(T.matrix('mu'+str(layer_i)))

    for layer_i in xrange(n_layers):
        diffe = T.tile(bias[layer_i].copy(), (nData,1))
        # All layers except top
        if layer_i < (n_layers-1):
            W_h = weights[layer_i].dot(muStates[layer_i+1].T).T
            diffe += W_h

        if layer_i > 0:
            vT_W = muStates[layer_i-1].dot(weights[layer_i-1])
            diffe += vT_W

        exK = muStates[layer_i]*T.exp(.5*-diffe) + (1.-muStates[layer_i])*T.exp(.5*diffe)
        flows += exK.sum()
    return flows
开发者ID:rctn,项目名称:DeepBoltzmannRN,代码行数:32,代码来源:simpledbm.py


示例20: _step

    def _step(self,h_tm1,p_x,p_xm,ctx):
        #visual attention
    
        #ctx=dropout_layer(ctx)
        v_a=T.exp(ctx+T.dot(h_tm1,self.W_v))
        v_a=v_a/v_a.sum(1, keepdims=True) 
        
        ctx_p=ctx*v_a
    
        #linguistic attention
        l_a=p_x+T.dot(h_tm1,self.W_l)[None,:,:]

        l_a=T.dot(l_a,self.U_att)+self.b_att        

        l_a=T.exp(l_a.reshape((l_a.shape[0],l_a.shape[1])))
        
        l_a=l_a/l_a.sum(0, keepdims=True) 
        
        l_a=l_a*p_xm
        
        p_x_p=(p_x*l_a[:,:,None]).sum(0)
        
        h= T.dot(ctx_p,self.W_vh) + T.dot(p_x_p,self.W_lh)

        return h
开发者ID:chuckgu,项目名称:Alphabeta,代码行数:25,代码来源:Modified_Layers.py



注:本文中的theano.tensor.exp函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。


鲜花

握手

雷人

路过

鸡蛋
该文章已有0人参与评论

请发表评论

全部评论

专题导读
上一篇:
Python tensor.eye函数代码示例发布时间:2022-05-27
下一篇:
Python tensor.erf函数代码示例发布时间:2022-05-27
热门推荐
阅读排行榜

扫描微信二维码

查看手机版网站

随时了解更新最新资讯

139-2527-9053

在线客服(服务时间 9:00~18:00)

在线QQ客服
地址:深圳市南山区西丽大学城创智工业园
电邮:jeky_zhao#qq.com
移动电话:139-2527-9053

Powered by 互联科技 X3.4© 2001-2213 极客世界.|Sitemap