• 设为首页
  • 点击收藏
  • 手机版
    手机扫一扫访问
    迪恩网络手机版
  • 关注官方公众号
    微信扫一扫关注
    公众号

Python tensor.inv函数代码示例

原作者: [db:作者] 来自: [db:来源] 收藏 邀请

本文整理汇总了Python中theano.tensor.inv函数的典型用法代码示例。如果您正苦于以下问题:Python inv函数的具体用法?Python inv怎么用?Python inv使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。



在下文中一共展示了inv函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。

示例1: loss

    def loss(self, n_samples, regularization_strength, mix, mu, sigma):
        log_sum_loss = -tensor.sum(tensor.log(
                            tensor.sum(mix * tensor.inv(np.sqrt(2 * np.pi) * sigma) *
                                       tensor.exp(tensor.neg(tensor.sqr(mu - self.target_vector)) *
                                                  tensor.inv(2 * tensor.sqr(sigma))), axis=0)
        ))

        # reg_loss = tensor.sum(tensor.sqr(self.layers.values()[0].W))
        # for layer in self.layers.values()[1:]:
        #     reg_loss += tensor.sum(tensor.sqr(layer.W))

        # regularization = 1/n_samples * regularization_strength/2 * reg_loss

        return log_sum_loss #+ regularization
开发者ID:jotterbach,项目名称:SimpleNeuralNets,代码行数:14,代码来源:MDN_Log_Sum_Loss.py


示例2: get_output_for

    def get_output_for(self, input, deterministic=False,
                       batch_norm_use_averages=None,
                       batch_norm_update_averages=None, **kwargs):

        self.count = self.count + 1
        self.alpha = 5.0 / (10 + self.count)
        # self.alpha = 1.0 / (self.count^2)

        input_mean = input.mean(self.axes)
        input_inv_std = T.inv(T.sqrt(input.var(self.axes) + self.epsilon))

        # Decide whether to use the stored averages or mini-batch statistics
        if batch_norm_use_averages is None:
            batch_norm_use_averages = deterministic
        use_averages = batch_norm_use_averages

        if use_averages:
            mean = self.mean
            inv_std = self.inv_std
        else:
            mean = input_mean
            inv_std = input_inv_std

        # Decide whether to update the stored averages
        if batch_norm_update_averages is None:
            batch_norm_update_averages = not deterministic
        update_averages = batch_norm_update_averages

        if update_averages:
            # Trick: To update the stored statistics, we create memory-aliased
            # clones of the stored statistics:
            running_mean = theano.clone(self.mean, share_inputs=False)
            running_inv_std = theano.clone(self.inv_std, share_inputs=False)
            # set a default update for them:
            running_mean.default_update = ((1 - self.alpha) * running_mean +
                                           self.alpha * input_mean)
            running_inv_std.default_update = ((1 - self.alpha) *
                                              running_inv_std +
                                              self.alpha * input_inv_std)
            # and make sure they end up in the graph without participating in
            # the computation (this way their default_update will be collected
            # and applied, but the computation will be optimized away):
            mean += 0 * running_mean    
            inv_std += 0 * running_inv_std

        # prepare dimshuffle pattern inserting broadcastable axes as needed
        param_axes = iter(range(input.ndim - len(self.axes)))
        pattern = ['x' if input_axis in self.axes
                   else next(param_axes)
                   for input_axis in range(input.ndim)]

        # apply dimshuffle pattern to all parameters
        beta = 0 if self.beta is None else self.beta.dimshuffle(pattern)
        gamma = 1 if self.gamma is None else self.gamma.dimshuffle(pattern)
        mean = mean.dimshuffle(pattern)
        inv_std = inv_std.dimshuffle(pattern)

        # normalize
        normalized = (input - mean) * (gamma * inv_std) + beta
        return normalized
开发者ID:myt00seven,项目名称:svrg,代码行数:60,代码来源:my_bn_layer_5_10_m.py


示例3: normal_log_likelihood_per_component

def normal_log_likelihood_per_component(x, mu, sigma, mixing):
     return (
        MINUS_HALF_LOG_2PI
        - T.log(sigma)
        - 0.5 * T.inv(sigma**2) * (x - mu)**2
        + T.log(mixing)
    )
开发者ID:mmottahedi,项目名称:neuralnilm_prototype,代码行数:7,代码来源:objectives.py


示例4: __init

def __init():
    dataset = T.matrix("dataset", dtype=config.globalFloatType())
    trans_dataset = T.transpose(dataset)
    dot_mul = T.dot(dataset, trans_dataset)
    l2 = T.sqrt(T.sum(T.square(dataset), axis=1))
    
#     p =printing.Print("l2")
#     l2 = p(l2)
    
    l2_inv2 = T.inv(l2).dimshuffle(['x', 0])
#     p =printing.Print("l2_inv2")
#     l2_inv2 = p(l2_inv2)
    
    l2_inv1 = T.transpose(l2_inv2)
#     p =printing.Print("l2_inv1")
#     l2_inv1 = p(l2_inv1)
    
    l2_inv = T.dot(l2_inv1, l2_inv2)
    
#     p =printing.Print("l2_inv")
#     l2_inv = p(l2_inv)
    
    affinty = (T.mul(dot_mul, l2_inv) + 1) / 2
    globals()['__affinty_fun'] = theano.function(
             [dataset],
             [affinty],
             allow_input_downcast=True
             )
开发者ID:persistforever,项目名称:sentenceEmbedding,代码行数:28,代码来源:affinity_matrix.py


示例5: set_generator_update_function

def set_generator_update_function(generator_rnn_model,
                                  generator_mean_model,
                                  generator_std_model,
                                  generator_optimizer,
                                  grad_clipping):

    # input data (time length * num_samples * input_dims)
    source_data = tensor.tensor3(name='source_data',
                                 dtype=floatX)

    target_data = tensor.tensor3(name='target_data',
                                 dtype=floatX)

    # set generator input data list
    generator_input_data_list = [source_data,]

    # get generator hidden data
    hidden_data = generator_rnn_model[0].forward(generator_input_data_list, is_training=True)[0]

    # get generator output data
    output_mean_data = get_tensor_output(input=hidden_data,
                                         layers=generator_mean_model,
                                         is_training=True)
    output_std_data = get_tensor_output(input=hidden_data,
                                        layers=generator_std_model,
                                        is_training=True)

    generator_cost  = -0.5*tensor.inv(2.0*tensor.sqr(output_std_data))*tensor.sqr(output_mean_data-target_data)
    generator_cost += -0.5*tensor.log(2.0*tensor.sqr(output_std_data)*numpy.pi)

    # set generator update
    generator_updates_cost = generator_cost.mean()
    generator_updates_dict = get_model_updates(layers=generator_rnn_model+generator_mean_model+generator_std_model,
                                               cost=generator_updates_cost,
                                               optimizer=generator_optimizer,
                                               use_grad_clip=grad_clipping)

    gradient_dict  = get_model_gradients(generator_rnn_model+generator_mean_model+generator_std_model, generator_updates_cost)
    gradient_norm  = 0.
    for grad in gradient_dict:
        gradient_norm += tensor.sum(grad**2)
        gradient_norm  = tensor.sqrt(gradient_norm)

    # set generator update inputs
    generator_updates_inputs  = [source_data,
                                 target_data,]

    # set generator update outputs
    generator_updates_outputs = [generator_cost, gradient_norm]

    # set generator update function
    generator_updates_function = theano.function(inputs=generator_updates_inputs,
                                                 outputs=generator_updates_outputs,
                                                 updates=generator_updates_dict,
                                                 on_unused_input='ignore')

    return generator_updates_function
开发者ID:taesupkim,项目名称:ift6266h16,代码行数:57,代码来源:gf_lstm_model_0.py


示例6: set_generator_update_function

def set_generator_update_function(
    generator_rnn_model, generator_mean_model, generator_std_model, generator_optimizer, grad_clipping
):

    # input data (time length * num_samples * input_dims)
    source_data = tensor.tensor3(name="source_data", dtype=floatX)

    target_data = tensor.tensor3(name="target_data", dtype=floatX)

    # set generator input data list
    generator_input_data_list = [source_data]

    # get generator hidden data
    hidden_data = generator_rnn_model[0].forward(generator_input_data_list, is_training=True)[0]
    hidden_data = hidden_data.dimshuffle(0, 2, 1, 3).flatten(3)

    # get generator output data
    output_mean_data = get_tensor_output(input=hidden_data, layers=generator_mean_model, is_training=True)
    # output_std_data = get_tensor_output(input=hidden_data,
    #                                     layers=generator_std_model,
    #                                     is_training=True)
    output_std_data = 0.22
    # get generator cost (time_length x num_samples x hidden_size)
    generator_cost = 0.5 * tensor.inv(2.0 * tensor.sqr(output_std_data)) * tensor.sqr(output_mean_data - target_data)
    generator_cost += tensor.log(output_std_data) + 0.5 * tensor.log(2.0 * numpy.pi)
    generator_cost = tensor.sum(generator_cost, axis=2)

    # set generator update
    generator_updates_cost = generator_cost.mean()
    generator_updates_dict = get_model_updates(
        layers=generator_rnn_model + generator_mean_model,
        cost=generator_updates_cost,
        optimizer=generator_optimizer,
        use_grad_clip=grad_clipping,
    )

    gradient_dict = get_model_gradients(generator_rnn_model + generator_mean_model, generator_updates_cost)
    gradient_norm = 0.0
    for grad in gradient_dict:
        gradient_norm += tensor.sum(grad ** 2)
    gradient_norm = tensor.sqrt(gradient_norm)

    # set generator update inputs
    generator_updates_inputs = [source_data, target_data]

    # set generator update outputs
    generator_updates_outputs = [generator_cost, gradient_norm]

    # set generator update function
    generator_updates_function = theano.function(
        inputs=generator_updates_inputs,
        outputs=generator_updates_outputs,
        updates=generator_updates_dict,
        on_unused_input="ignore",
    )

    return generator_updates_function
开发者ID:taesupkim,项目名称:ift6266h16,代码行数:57,代码来源:lstm_stack_model0.py


示例7: energy_function

 def energy_function(feature_data, is_train=True):
     # feature-wise std
     feature_std_inv = T.inv(T.nnet.softplus(feature_std)+1e-10)
     # energy hidden-feature
     e = softplus(T.dot(feature_data*feature_std_inv, linear_w0)+linear_b0)
     e = T.sum(-e, axis=1)
     # energy feature prior
     e += 0.5*T.sum(T.sqr(feature_std_inv)*T.sqr(feature_data-feature_mean), axis=1)
     return e
开发者ID:taesupkim,项目名称:dcgan_code,代码行数:9,代码来源:energy_rbm_face_1.py


示例8: logsum_loss

    def logsum_loss(self, n_samples, l1_regularization_strength, l2_regularization_strength):
        log_sum_loss = -tensor.sum(tensor.log(
                            tensor.sum(self.mix * tensor.inv(np.sqrt(2 * np.pi) * self.sigma) *
                                       tensor.exp(tensor.neg(tensor.sqr(self.mu - self.target_vector)) *
                                                  tensor.inv(2 * tensor.sqr(self.sigma))), axis=0)
        ))

        l1_reg_loss = tensor.sum(np.abs(self.layers.values()[0].W))
        for layer in self.layers.values()[1:]:
            l1_reg_loss += tensor.sum(np.abs(layer.W))

        l2_reg_loss = tensor.sum(tensor.sqr(self.layers.values()[0].W))
        for layer in self.layers.values()[1:]:
            l2_reg_loss += tensor.sum(tensor.sqr(layer.W))

        l1_regularization = 1/n_samples * l1_regularization_strength/2 * l1_reg_loss

        l2_regularization = 1/n_samples * l2_regularization_strength/2 * l2_reg_loss

        return log_sum_loss + l1_regularization + l2_regularization
开发者ID:jotterbach,项目名称:SimpleNeuralNets,代码行数:20,代码来源:MDN.py


示例9: __spectral_matrix

	def __spectral_matrix(self, covariance):
		egvalues, egmatrix = T.nlinalg.eig(covariance)
		egmatrix_inv = T.nlinalg.matrix_inverse(egmatrix)
		diag_sqr_inv = T.nlinalg.alloc_diag(
			T.inv(
				T.sqrt(
					T.switch(T.eq(egvalues,0), 0.001, egvalues)
				)
			)
		)
		return egmatrix.dot(diag_sqr_inv).dot(egmatrix_inv)
开发者ID:tkaplan,项目名称:MLTextParser,代码行数:11,代码来源:PreProcessing.py


示例10: standardize

def standardize(layer, offset, scale, shared_axes):
    """
    Convenience function for standardizing inputs by applying a fixed offset
    and scale.  This is usually useful when you want the input to your network
    to, say, have zero mean and unit standard deviation over the feature
    dimensions.  This layer allows you to include the appropriate statistics to
    achieve this normalization as part of your network, and applies them to its
    input.  The statistics are supplied as the `offset` and `scale` parameters,
    which are applied to the input by subtracting `offset` and dividing by
    `scale`, sharing dimensions as specified by the `shared_axes` argument.

    Parameters
    ----------
    layer : a :class:`Layer` instance or a tuple
        The layer feeding into this layer, or the expected input shape.
    offset : Theano shared variable, expression, or numpy array
        The offset to apply (via subtraction) to the axis/axes being
        standardized.
    scale : Theano shared variable, expression or numpy array
        The scale to apply (via division) to the axis/axes being standardized.
    shared_axes : 'auto', int or tuple of int
        The axis or axes to share the offset and scale over. If ``'auto'`` (the
        default), share over all axes except for the second: this will share
        scales over the minibatch dimension for dense layers, and additionally
        over all spatial dimensions for convolutional layers.

    Examples
    --------
    Assuming your training data exists in a 2D numpy ndarray called
    ``training_data``, you can use this function to scale input features to the
    [0, 1] range based on the training set statistics like so:

    >>> import lasagne
    >>> import numpy as np
    >>> training_data = np.random.standard_normal((100, 20))
    >>> input_shape = (None, training_data.shape[1])
    >>> l_in = lasagne.layers.InputLayer(input_shape)
    >>> offset = training_data.min(axis=0)
    >>> scale = training_data.max(axis=0) - training_data.min(axis=0)
    >>> l_std = standardize(l_in, offset, scale, shared_axes=0)

    Alternatively, to z-score your inputs based on training set statistics, you
    could set ``offset = training_data.mean(axis=0)`` and
    ``scale = training_data.std(axis=0)`` instead.
    """
    # Subtract the offset
    layer = BiasLayer(layer, -offset, shared_axes)
    # Do not optimize the offset parameter
    layer.params[layer.b].remove('trainable')
    # Divide by the scale
    layer = ScaleLayer(layer, T.inv(scale), shared_axes)
    # Do not optimize the scales parameter
    layer.params[layer.scales].remove('trainable')
    return layer
开发者ID:ALISCIFP,项目名称:Segmentation,代码行数:54,代码来源:special.py


示例11: test_dnn_batchnorm_train

def test_dnn_batchnorm_train():
    if not dnn.dnn_available(test_ctx_name):
        raise SkipTest(dnn.dnn_available.msg)
    if dnn.version(raises=False) < 5000:
        raise SkipTest("batch normalization requires cudnn v5+")
    utt.seed_rng()

    for mode in ('per-activation', 'spatial'):
        for vartype in (T.ftensor4, T.ftensor3, T.fmatrix, T.fvector):
            x, scale, bias = (vartype(n) for n in ('x', 'scale', 'bias'))
            ndim = x.ndim
            eps = 5e-3  # some non-standard value to test if it's used

            # forward pass
            out, x_mean, x_invstd = dnn.dnn_batch_normalization_train(
                x, scale, bias, mode, eps)
            # reference forward pass
            if mode == 'per-activation':
                axes = (0,)
            elif mode == 'spatial':
                axes = (0,) + tuple(range(2, ndim))
            x_mean2 = x.mean(axis=axes, keepdims=True)
            x_invstd2 = T.inv(T.sqrt(x.var(axis=axes, keepdims=True) + eps))
            scale2 = T.addbroadcast(scale, *axes)
            bias2 = T.addbroadcast(bias, *axes)
            out2 = (x - x_mean2) * (scale2 * x_invstd2) + bias2
            # backward pass
            dy = vartype('dy')
            grads = T.grad(None, wrt=[x, scale, bias], known_grads={out: dy})
            # reference backward pass
            grads2 = T.grad(None, wrt=[x, scale, bias], known_grads={out2: dy})
            # compile
            f = theano.function([x, scale, bias, dy],
                                [out, x_mean, x_invstd, out2, x_mean2, x_invstd2] +
                                grads + grads2, mode=mode_with_gpu)
            # run
            for data_shape in ((10, 20, 30, 40), (4, 3, 1, 1), (1, 1, 5, 5)):
                data_shape = data_shape[:ndim]
                param_shape = tuple(1 if d in axes else s
                                    for d, s in enumerate(data_shape))
                X = 4 + 3 * numpy.random.randn(*data_shape).astype('float32')
                Dy = -1 + 2 * numpy.random.randn(*data_shape).astype('float32')
                Scale = numpy.random.randn(*param_shape).astype('float32')
                Bias = numpy.random.randn(*param_shape).astype('float32')
                outputs = f(X, Scale, Bias, Dy)
                # compare outputs
                utt.assert_allclose(outputs[0], outputs[0 + 3])  # out
                utt.assert_allclose(outputs[1], outputs[1 + 3])  # mean
                utt.assert_allclose(outputs[2], outputs[2 + 3])  # invstd
                # compare gradients
                utt.assert_allclose(outputs[6], outputs[6 + 3])  # dx
                utt.assert_allclose(outputs[7], outputs[7 + 3], rtol=3e-3)  # dscale
                utt.assert_allclose(outputs[8], outputs[8 + 3])  # dbias
开发者ID:nke001,项目名称:Theano,代码行数:53,代码来源:test_dnn.py


示例12: get_symbolic_thermal_hmm_params

    def get_symbolic_thermal_hmm_params(log_prior_c: types.TheanoVector,
                                        log_trans_tcc: types.TheanoTensor3,
                                        log_emission_tc: types.TheanoMatrix,
                                        temperature: tt.scalar):
        inv_temperature = tt.inv(temperature)

        thermal_log_prior_c = inv_temperature * log_prior_c
        thermal_log_prior_c -= pm.math.logsumexp(thermal_log_prior_c)
        thermal_log_trans_tcc = inv_temperature * log_trans_tcc
        thermal_log_trans_tcc -= pm.math.logsumexp(thermal_log_trans_tcc, axis=-1)
        thermal_log_emission_tc = inv_temperature * log_emission_tc

        return thermal_log_prior_c, thermal_log_trans_tcc, thermal_log_emission_tc
开发者ID:broadinstitute,项目名称:gatk,代码行数:13,代码来源:theano_hmm.py


示例13: predict

    def predict(self, X1, y1, X2):
   
        cov_train = self.compute_cov_s(X1,self.N)
        cov_test  = self.compute_cov_s(X2,self.M)
        cov_te_tr = self.compute_cov(X1,X2,self.N,self.M)     
        cov_tr_te = cov_te_tr.T

        arg0  = T.inv(cov_train+self.noise**2 *T.identity_like(cov_train))
        #arg0  = T.inv(cov_train)
        arg1  = T.dot(cov_te_tr, arg0)
        mu    = T.dot(arg1,y1)
        sigma = cov_test - T.dot(arg1, cov_tr_te) 

        return mu,T.diag(sigma)
开发者ID:Scott-Alex,项目名称:ML_demo,代码行数:14,代码来源:gp_regression.py


示例14: logp

def logp(X):
    '''
    logp de la probabilidad de muchas gaussianas
    '''
#    print(X.shape.eval(), mu.shape.eval())
    err = T.reshape(X, (-1,2)) - T.reshape(mu, (-1,2))  # shaped as (n*m,2)

    S = T.inv(cov)  # np.linalg.inv(cov)

    E = (T.reshape(err, (-1, 2, 1)) *
         S *
         T.reshape(err, (-1, 1, 2))
         ).sum()

    return - E / 2
开发者ID:sebalander,项目名称:sebaPhD,代码行数:15,代码来源:intrinsicCalibFullMH.py


示例15: _whiten_input

    def _whiten_input(self, n): 
        X = T.matrix('X', dtype=theano.config.floatX)

        cov = T.dot(X.T, X) / (n - 1)
        
        eigenvalues, eigenvectors = T.nlinalg.eig(cov)

        V = eigenvectors
        D = eigenvalues
        D_prime = T.nlinalg.alloc_diag(T.inv(T.sqrt(D + self.e_zca)))
                
        M = T.dot(V, T.dot(D_prime, V.T))

        # now the input has been rotated: each column is a sample
        return theano.function(inputs=[X], outputs=T.dot(M, X.T))
开发者ID:bachard,项目名称:2015-DL-practicalcourse,代码行数:15,代码来源:kmeans.py


示例16: addFullBNLayerTrain

def addFullBNLayerTrain(x,gamma,beta, mean=None, var=None):
    fsize = gamma.get_value().shape[0]
    ep = 1e-5
    momentum = 0.9
    if mean is None:
        mean = theano.shared(np.zeros((fsize,)))
        var = theano.shared(np.ones((fsize,)))
    input_mean = T.mean(x, axis=0)
    input_var = T.var(x, axis=0)
    inv_std = T.inv(T.sqrt(input_var + ep))
    
    updates = []
    updates.append((mean, momentum*mean+(1-momentum)*input_mean))
    updates.append((var,momentum*var+(1-momentum)*(x.shape[0]/(x.shape[0]-1)*input_var)))

    o = (x-input_mean) * gamma * inv_std + beta
    return o, mean, var, updates
开发者ID:tyhu,项目名称:PyAI,代码行数:17,代码来源:compact_cnn.py


示例17: set_generator_evaluation_function

def set_generator_evaluation_function(generator_rnn_model,
                                      generator_mean_model,
                                      generator_std_model):

    # input data (time length * num_samples * input_dims)
    source_data = tensor.tensor3(name='source_data',
                                 dtype=floatX)

    target_data = tensor.tensor3(name='target_data',
                                 dtype=floatX)

    # set generator input data list
    generator_input_data_list = [source_data,]

    # get generator hidden data
    hidden_data = generator_rnn_model[0].forward(generator_input_data_list, is_training=True)[0]
    hidden_data = hidden_data.dimshuffle(0, 2, 1, 3)
    hidden_data = hidden_data[:,:,-1,:].flatten(3)

    # get generator output data
    output_mean_data = get_tensor_output(input=hidden_data,
                                         layers=generator_mean_model,
                                         is_training=True)
    output_std_data = get_tensor_output(input=hidden_data,
                                        layers=generator_std_model,
                                        is_training=True)
    # output_std_data = 0.22
    # get generator cost (time_length x num_samples x hidden_size)
    generator_cost  = 0.5*tensor.inv(2.0*tensor.sqr(output_std_data))*tensor.sqr(output_mean_data-target_data)
    generator_cost += tensor.log(output_std_data) + 0.5*tensor.log(2.0*numpy.pi)
    generator_cost  = tensor.sum(generator_cost, axis=2)

    # set generator evaluate inputs
    generator_evaluate_inputs  = [source_data,
                                  target_data,]

    # set generator evaluate outputs
    generator_evaluate_outputs = [generator_cost,]

    # set generator evaluate function
    generator_evaluate_function = theano.function(inputs=generator_evaluate_inputs,
                                                  outputs=generator_evaluate_outputs,
                                                  on_unused_input='ignore')

    return generator_evaluate_function
开发者ID:taesupkim,项目名称:ift6266h16,代码行数:45,代码来源:gf_rnn_model_0.py


示例18: fprop

    def fprop(X, test):
        btest = tensor.lt(0, test)

        X_means = X.mean([0, 2, 3])
        X_inv_stds = tensor.inv(tensor.sqrt(X.var([0, 2, 3])) + epsilon)

        means_clone = theano.clone(means, share_inputs = False)
        inv_stds_clone = theano.clone(inv_stds, share_inputs = False)

        means_clone.default_update = ifelse(btest, means, lerp(means, X_means, alpha))
        inv_stds_clone.default_update = ifelse(btest, inv_stds, lerp(inv_stds, X_inv_stds, alpha))
    
        X_means += 0 * means_clone
        X_inv_stds += 0 * inv_stds_clone

        X_means = ifelse(btest, means, X_means)
        X_inv_stds = ifelse(btest, inv_stds, X_inv_stds)

        return (X - ds(X_means)) * ds(X_inv_stds) * ds(gammas)
开发者ID:ciechowoj,项目名称:nn_project,代码行数:19,代码来源:layers.py


示例19: _build

    def _build(self, input_tensor):
        self._instantiate_parameters(
            input_tensor.shape, input_tensor.dtype)

        input_tensor_ = input_tensor.unwrap()

        mean_acc = self.get_parameter_variable('mean').unwrap()
        var_acc = self.get_parameter_variable('var').unwrap()
        scale = self.get_parameter_variable('scale').unwrap()
        offset = self.get_parameter_variable('offset').unwrap()

        if self.args['learn']:
            decay = self.args['decay']
            mean_in = input_tensor_.mean(axis=self._axes)
            var_in = input_tensor_.var(self._axes)

            new_mean_acc = decay * mean_acc + (1 - decay) * mean_in
            new_var_acc = decay * var_acc + (1 - decay) * var_in

            self._update_operations.append(
                wrapper.Operation(
                    op={mean_acc: new_mean_acc},
                    name='update_mean',
                )
            )
            self._update_operations.append(
                wrapper.Operation(
                    op={var_acc: new_var_acc},
                    name='update_var',
                )
            )

            mean_acc = new_mean_acc
            var_acc = new_var_acc

        mean_acc = mean_acc.dimshuffle(self._pattern)
        var_acc = var_acc.dimshuffle(self._pattern)
        scale = scale.dimshuffle(self._pattern)
        offset = offset.dimshuffle(self._pattern)

        stdi = T.inv(T.sqrt(var_acc + self.args['epsilon']))
        output = scale * (input_tensor_ - mean_acc) * stdi + offset
        return wrapper.Tensor(output, shape=input_tensor.shape, name='output')
开发者ID:mthrok,项目名称:luchador,代码行数:43,代码来源:normalization.py


示例20: NLL

def NLL(sigma, mixing, y):
    """Computes the mean of negative log likelihood for P(y|x)
    
    y = T.matrix('y') # (minibatch_size, output_size)
    mu = T.tensor3('mu') # (minibatch_size, output_size, n_components)
    sigma = T.matrix('sigma') # (minibatch_size, n_components)
    mixing = T.matrix('mixing') # (minibatch_size, n_components)

    """

    # multivariate Gaussian
    exponent = -0.5 * T.inv(sigma) * T.sum(y ** 2, axis=1)
    normalizer = 2 * np.pi * sigma
    exponent = exponent + T.log(mixing) - (y.shape[1] * 0.5) * T.log(normalizer)
    max_exponent = T.max(exponent, axis=1)
    mod_exponent = exponent - max_exponent[:, None]
    gauss_mix = T.sum(T.exp(mod_exponent), axis=1)
    log_gauss = max_exponent + T.log(gauss_mix)
    res = -T.mean(log_gauss)
    return res
开发者ID:markstoehr,项目名称:structured_gaussian_mixtures,代码行数:20,代码来源:mdn.py



注:本文中的theano.tensor.inv函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。


鲜花

握手

雷人

路过

鸡蛋
该文章已有0人参与评论

请发表评论

全部评论

专题导读
上一篇:
Python tensor.iscalar函数代码示例发布时间:2022-05-27
下一篇:
Python tensor.inc_subtensor函数代码示例发布时间:2022-05-27
热门推荐
阅读排行榜

扫描微信二维码

查看手机版网站

随时了解更新最新资讯

139-2527-9053

在线客服(服务时间 9:00~18:00)

在线QQ客服
地址:深圳市南山区西丽大学城创智工业园
电邮:jeky_zhao#qq.com
移动电话:139-2527-9053

Powered by 互联科技 X3.4© 2001-2213 极客世界.|Sitemap