• 设为首页
  • 点击收藏
  • 手机版
    手机扫一扫访问
    迪恩网络手机版
  • 关注官方公众号
    微信扫一扫关注
    公众号

Python tensor.nonzero函数代码示例

原作者: [db:作者] 来自: [db:来源] 收藏 邀请

本文整理汇总了Python中theano.tensor.nonzero函数的典型用法代码示例。如果您正苦于以下问题:Python nonzero函数的具体用法?Python nonzero怎么用?Python nonzero使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。



在下文中一共展示了nonzero函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。

示例1: nll2

	def nll2(self, y):
		# for predicting whether a course is taken
		return -T.mean(
				T.log(self.output)[T.nonzero(y)]
			) - T.mean(
				T.log(1 - self.output)[T.nonzero(1 - y)]
			) 
开发者ID:mufan-li,项目名称:sg,代码行数:7,代码来源:nnet2.py


示例2: __init__

    def __init__(self, rng, batchsize, epochs=100, alpha=0.001, beta1=0.9, beta2=0.999, eps=1e-08, l1_weight=0.0, l2_weight=0.1, cost='mse'):
        self.alpha = alpha
        self.beta1 = beta1
        self.beta2 = beta2
        self.eps = eps
        self.l1_weight = l1_weight
        self.l2_weight = l2_weight
        self.rng = rng
        self.theano_rng = RandomStreams(rng.randint(2 ** 30))
        self.epochs = epochs
        self.batchsize = batchsize

        # Where cost is always the cost which is minimised in supervised training
        # the T.nonzero term ensures that the cost is only calculated for examples with a label
        #
        # Convetion: We mark unlabelled examples with a vector of zeros in lieu of a one-hot vector
        if   cost == 'mse':
            self.y_pred = lambda network, x: network(x)
            self.error = lambda network, y_pred, y: T.zeros((1,))
            self.cost = lambda network, x, y: T.mean((network(x)[T.nonzero(y)] - y[T.nonzero(y)]**2))
        elif cost == 'binary_cross_entropy':
            self.y_pred = lambda network, x: network(x)
            self.cost   = lambda network, y_pred, y: T.nnet.binary_crossentropy(y_pred[T.nonzero(y)], y[T.nonzero(y)]).mean()
            # classification error
            self.error  = lambda network, y_pred, y: T.mean(T.neq(T.argmax(y_pred, axis=1), T.argmax(y, axis=1)))
        elif cost == 'cross_entropy':
            self.y_pred = lambda network, x: network(x)
            self.cost   = lambda network, y_pred, y: T.nnet.categorical_crossentropy(y_pred[T.nonzero(y)], y[T.nonzero(y)]).mean()
            # classification error
            self.error  = lambda network, y_pred, y: T.mean(T.neq(T.argmax(y_pred, axis=1), T.argmax(y, axis=1)))
        else:
            self.y_pred = lambda network, x: network(x)
            self.error = lambda network, y_pred, y: T.zeros((1,))
            self.cost = cost
开发者ID:orangeduck,项目名称:deep-motion-analysis,代码行数:34,代码来源:AdamTrainer.py


示例3: categorical_crossentropy_segm

def categorical_crossentropy_segm(prediction_proba, targets):
    '''
    MODIFICATIONS:
        - reshape from image-size to array and back
    '''
    shape = T.shape(prediction_proba)
    pred_mod1 = T.transpose(prediction_proba, (0,2,3,1))
    pred_mod = T.reshape(pred_mod1, (-1,shape[1]))
    if prediction_proba.ndim == targets.ndim:
        targ_mod1 = T.transpose(targets,(0,2,3,1))
        targ_mod = T.reshape(targ_mod1,(-1,shape[1]))
    else:
        targ_mod = T.reshape(targets, (-1,))
    results = categorical_crossentropy(pred_mod, targ_mod)


    results = T.reshape(results, (shape[0],shape[2],shape[3]))



    # QUICK IMPLEMENTATION FOR TWO SPECIFIC CLASSES. NEEDS GENERALIZATION
    # Weights depending on class occurency:
    weights = (1.02275, 44.9647)
    cars_indx, not_cars_indx = T.nonzero(targets), T.nonzero(T.eq(targets,0))
    T.set_subtensor(results[cars_indx], results[cars_indx]*float32(weights[1]) )
    T.set_subtensor(results[not_cars_indx], results[not_cars_indx]*float32(weights[0]) )


    return T.sum(results, axis=(1,2))
开发者ID:abailoni,项目名称:greedy_CNN,代码行数:29,代码来源:segm_utils.py


示例4: unet_crossentropy_loss_sampled

def unet_crossentropy_loss_sampled(y_true, y_pred):
    print 'unet_crossentropy_loss_sampled'
    epsilon = 1.0e-4
    y_pred_clipped = T.flatten(T.clip(y_pred, epsilon, 1.0-epsilon))
    y_true = T.flatten(y_true)
    # this seems to work
    # it is super ugly though and I am sure there is a better way to do it
    # but I am struggling with theano to cooperate
    # filter the right indices
    indPos = T.nonzero(y_true)[0] # no idea why this is a tuple
    indNeg = T.nonzero(1-y_true)[0]
    # shuffle
    n = indPos.shape[0]
    indPos = indPos[srng.permutation(n=n)]
    n = indNeg.shape[0]
    indNeg = indNeg[srng.permutation(n=n)]
    # take equal number of samples depending on which class has less
    n_samples = T.cast(T.min([T.sum(y_true), T.sum(1-y_true)]), dtype='int64')

    indPos = indPos[:n_samples]
    indNeg = indNeg[:n_samples]
    loss_vector = -T.mean(T.log(y_pred_clipped[indPos])) - T.mean(T.log(1-y_pred_clipped[indNeg]))
    average_loss = T.mean(loss_vector)
    print 'average_loss:', average_loss
    return average_loss
开发者ID:Rhoana,项目名称:icon,代码行数:25,代码来源:oldunet2.py


示例5: prepare_loss

def prepare_loss(inputlayer, outlayer, pairs, types, loss_function,
                 entropy_penalty=0, V=None, lamb=-1, train_pass=False):
    # reshape to 2d before sending through the network,
    # after which the original shape is recovered
    output = outlayer.output(
        {inputlayer: pairs.reshape((-1, pairs.shape[-1]))},
        train_pass=train_pass).reshape((pairs.shape[0], 2, -1))

    x1, x2 = output[:,0], output[:,1]
    cost = loss_function(x1, x2, types)
    same_loss = cost[T.nonzero(types)].mean()
    diff_loss = cost[T.nonzero(1 - types)].mean()

    if lamb >= 0:
        cost = 1 / (lamb + 1) * same_loss + lamb / (lamb + 1) * diff_loss
    else:
        cost = cost.mean()

    ent = entropy_loss(x1, x2)
    total_cost = cost + entropy_penalty * ent

    if V is not None:
        return total_cost, cost, same_loss, diff_loss, ent, calculate_spread(V)
    else:
        return total_cost, cost, same_loss, diff_loss, ent
开发者ID:arvidfm,项目名称:masters-thesis,代码行数:25,代码来源:train.py


示例6: past_weight_grad_step

def past_weight_grad_step(xs, es, kp_x, kd_x, kp_e, kd_e, shape, dws=None):
    """
    Do an efficient update of the weights given the two spike-update.

    (This still runs FING SLOWLY!)

    :param xs: An (n_in) vector
    :param es: An (n_out) vector
    :param kp_x:
    :param kd_x:
    :param kp_e:
    :param kd_e:
    :param shapes: (n_in, n_out)
    :return:
    """
    kp_x, kd_x, kp_e, kd_e = [as_floatx(k) for k in (kp_x, kd_x, kp_e, kd_e)]
    n_in, n_out = shape
    rx = kd_x/(kp_x+kd_x)
    re = kd_e/(kp_e+kd_e)

    tx_last = create_shared_variable(np.zeros(n_in)+1)
    te_last = create_shared_variable(np.zeros(n_out)+1)
    x_last = create_shared_variable(np.zeros(n_in))
    e_last = create_shared_variable(np.zeros(n_out))
    x_spikes = tt.neq(xs, 0)
    e_spikes = tt.neq(es, 0)
    x_spike_ixs, = tt.nonzero(x_spikes)
    e_spike_ixs, = tt.nonzero(e_spikes)

    if dws is None:
        dws = tt.zeros(shape)

    t_last = tt.minimum(tx_last[x_spike_ixs, None], te_last)  # (n_x_spikes, n_out)
    dws = tt.inc_subtensor(dws[x_spike_ixs, :], x_last[x_spike_ixs, None]*e_last
        * rx**(tx_last[x_spike_ixs, None]-t_last)
        * re**(te_last[None, :]-t_last)
        * geoseries_sum(re*rx, t_end=t_last, t_start=1)
        )

    new_x_last = tt.set_subtensor(x_last[x_spike_ixs], x_last[x_spike_ixs]*rx**tx_last[x_spike_ixs]+ xs[x_spike_ixs]/as_floatx(kd_x))
    new_tx_last = tt.switch(x_spikes, 0, tx_last)

    t_last = tt.minimum(new_tx_last[:, None], te_last[e_spike_ixs])  # (n_in, n_e_spikes)
    dws = tt.inc_subtensor(dws[:, e_spike_ixs], new_x_last[:, None]*e_last[e_spike_ixs]
        * rx**(new_tx_last[:, None]-t_last)
        * re**(te_last[None, e_spike_ixs]-t_last)
        * geoseries_sum(re*rx, t_end=t_last, t_start=1)
        )

    add_update(x_last, new_x_last)
    add_update(e_last, tt.set_subtensor(e_last[e_spike_ixs], e_last[e_spike_ixs]*re**te_last[e_spike_ixs]+ es[e_spike_ixs]/as_floatx(kd_e)))
    add_update(tx_last, new_tx_last+1)
    add_update(te_last, tt.switch(e_spikes, 1, te_last+1))
    return dws
开发者ID:petered,项目名称:pdnn-test,代码行数:54,代码来源:pdnnet.py


示例7: logp_theano_comorbidities

def logp_theano_comorbidities(logLike,nObs,B0,B,X,S,T):
        logLike = 0.0

        #Unwrap t=0 points for B0
        zeroIndices = np.roll(T.cumsum(),1)
        #zeroIndices = np.roll(T.cumsum(),1)
        zeroIndices[0] = 0;
        zeroIndices = zeroIndices.astype('int32')

        #import pdb; pdb.set_trace()

        #Likelihood from B0 for X=1 and X=0 cases
        logLike += (X[zeroIndices]*TT.log(B0[:,S[zeroIndices]]).T).sum()
        logLike += ((1-X[zeroIndices])*TT.log(1.-B0[:,S[zeroIndices]]).T).sum()

        stateChange = S[1:]-S[:-1]
    # Don't consider t=0 points
        #import pdb; pdb.set_trace()
        #setZero = TT.as_tensor_variable(zeroIndices[1:]-1)
        #TT.set_subtensor(stateChange[setZero],0)
        stateChange = TT.set_subtensor(stateChange[zeroIndices[1:]-1],0)
        #stateChange[setZero] = 0
        #stateChange[zeroIndices[1:]-1] = 0
        changed = TT.nonzero(stateChange)[0]+1

        #import pdb; pdb.set_trace()

        # A change can only happen from 0 to 1 given our assumptions
        logLike += ((X[changed]-X[changed-1])*TT.log(B[:,S[changed]]).T).sum()
        logLike += (((1-X[changed])*(1-X[changed-1]))*TT.log(1.-B[:,S[changed]]).T).sum()
        #logLike += (X[changed]*np.log(B[:,S[changed]]).T).sum()
        
	return logLike
开发者ID:clinicalml,项目名称:ContinuousTimeMarkovModel,代码行数:33,代码来源:distributions.py


示例8: train_batch

    def train_batch(self, batch_size):
        T = self.AE.T
        T = T.tocsr()
        nonzero_indices = T.nonzero()
        #pdb.set_trace()
        n_users = len(np.unique(nonzero_indices[0]))
        indices = np.unique(nonzero_indices[0])
        for epoch in xrange(self.epochs):
            l = []
            for ind, i in enumerate(xrange(0, n_users, batch_size)):
                # CHECK : SEEMS BUGGY. 
                #------------------------
                #ratings = T[indices[i:(i + batch_size)], :].toarray().astype(np.float32)
                ratings = T[indices[i:(i + batch_size)], :].toarray().astype(np.float32)

                #------------------------
                #print ratings
                #pdb.set_trace()
                loss = self.AE.ae_batch(ratings)
                #loss = self.AE.debug(ratings)
                #print loss
                #pdb.set_trace()
                l.append(loss)
            m = np.mean(np.array(l))
            print("mean Loss for epoch %d  batch %d is %f"%(epoch, ind, m))
            rmse = self.RMSE_sparse()
            print("RMSE after one epoch is %f"%(rmse))
            f.write(str(rmse) + '\n')
开发者ID:shashankg7,项目名称:trust-inference-autoencoder,代码行数:28,代码来源:training_advagato.py


示例9: exp

    def exp(self, X, U):
        norm_U = tensor.sqrt(tensor.sum((U ** 2), axis=0)).reshape((1, self._n))

        Y = X * tensor.cos(norm_U) + U * (tensor.sin(norm_U) / norm_U)

        # For those columns where the step is too small, use a retraction.
        exclude = tensor.nonzero(norm_U <= 4.5e-8)[-1]
        Y[:, exclude] = self._normalize_columns(X[:, exclude] + U[:, exclude])

        return Y
开发者ID:Nehoroshiy,项目名称:theano_manifold,代码行数:10,代码来源:oblique.py


示例10: MASK_blanking

def MASK_blanking(x_i):
    # Find indicies of first and last non-zero value in x_i
    idxs = T.nonzero(x_i)[0][[1, -1]]
    # Diff = no of non zero values
    no_values = idxs[1] - idxs[0]
    # Move index inside by proportion of no of values
    idxs0 = T.cast(T.floor(idxs[0] + no_values * blank_proportion), 'int32')
    idxs1 = T.cast(T.floor(idxs[1] - no_values * blank_proportion), 'int32')
    # Return a vector that has a tighter mask than x_i
    return T.set_subtensor(T.zeros_like(x_i)[idxs0:idxs1], T.alloc(1., idxs1-idxs0))
开发者ID:LarsHH,项目名称:reconstructionAE,代码行数:10,代码来源:non_fixed_crops_vae.py


示例11: add_synap_post_inp

 def add_synap_post_inp(i,po,p,s,q):
     # i:: sequence
     # po:: post
     # p:: pre
     # s:: dA
     # q:: W
     index = T.nonzero(q[:self.Ne,i])
     npo = T.inc_subtensor(po[index,i],s)
     nw = T.inc_subtensor(q[:,i],p[:,i])
     nw = T.clip(nw,0,self.wmax)
     return {po:npo,q:nw}
开发者ID:veinpy,项目名称:SNN_theano,代码行数:11,代码来源:snn_1st.py


示例12: unet_crossentropy_loss_sampled

def unet_crossentropy_loss_sampled(y_true, y_pred):
    epsilon = 1.0e-4
    y_pred_clipped = T.flatten(T.clip(y_pred, epsilon, 1.0-epsilon))
    y_true = T.flatten(y_true)
    # this seems to work
    # it is super ugly though and I am sure there is a better way to do it
    # but I am struggling with theano to cooperate
    # filter the right indices
    indPos = T.nonzero(y_true)[0] # no idea why this is a tuple
    indNeg = T.nonzero(1-y_true)[0]
    # shuffle
    n = indPos.shape[0]
    indPos = indPos[srng.permutation(n=n)]
    n = indNeg.shape[0]
    indNeg = indNeg[srng.permutation(n=n)]
    # subset assuming each class has at least 100 samples present
    indPos = indPos[:200]
    indNeg = indNeg[:200]
    loss_vector = -T.mean(T.log(y_pred_clipped[indPos])) - T.mean(T.log(1-y_pred_clipped[indNeg]))
    average_loss = T.mean(loss_vector)
    return average_loss
开发者ID:cometyang,项目名称:network-evaluations,代码行数:21,代码来源:unet_multiChannel.py


示例13: fprop_step

        def fprop_step(state_below, index, state_before, W, U, b):

            state_now = state_before.copy()
            index = self.num_modules -\
                tensor.nonzero(tensor.mod(index+1, self.M))[0].shape[0]
            this_range = index * self.module_dim
            z = tensor.dot(state_below, W[:, :this_range]) +\
                tensor.dot(state_before, U[:, :this_range]) +\
                b[:this_range]
            z = tensor.tanh(z)
            state_now = tensor.set_subtensor(state_now[:, :this_range], z)

            return state_now
开发者ID:zhangmeishan,项目名称:pylearn2,代码行数:13,代码来源:rnn.py


示例14: add_synap_pre_inp

        def add_synap_pre_inp(i,p,po,s,q):
            # i :: sequence
            # p :: pre | post
            # s :: dApre | dApost
            # q :: W
            index = T.nonzero(q[i,:self.Ne])
            np = T.inc_subtensor(p[i,index],s)
##            tmp = p[i,:]
##            tmp=T.inc_subtensor(tmp[index],s)
##            np=T.set_subtensor(p[i,:],tmp)
            #np = T.inc_subtensor(p[i,:],s)
            nw = T.inc_subtensor(q[i,:],po[i,:])
            nw=T.clip(nw,0,self.wmax)
            return {p:np,q:nw}
开发者ID:veinpy,项目名称:SNN_theano,代码行数:14,代码来源:snn_1st.py


示例15: train_batch

 def train_batch(self, batch_size):
     T = self.AE.T
     T = T.tocsr()
     nonzero_indices = T.nonzero()
     #pdb.set_trace()
     n_users = len(np.unique(nonzero_indices[0]))
     indices = np.unique(nonzero_indices[0])
     for epoch in xrange(self.epochs):
         for ind, i in enumerate(xrange(0, n_users, batch_size)):
             ratings = T[indices[i:(i + batch_size)], :].toarray().astype(np.float32)
             #print ratings
             #pdb.set_trace()
             loss = self.AE.ae_batch(ratings)
             #loss = self.AE.debug(ratings)
             print loss
开发者ID:shashankg7,项目名称:trust-inference-autoencoder,代码行数:15,代码来源:training.py


示例16: step

 def step(input_step, previous_activation, time_step, W_in, W_self, biases):
     new_activation = previous_activation.copy()
     modzero = T.nonzero(T.eq(T.mod(time_step, self.group_labels), 0))[0]
     W_in_now = T.flatten(W_in[:, modzero, :], outdim=2)
     W_self_now = T.flatten(W_self[:, modzero, :], outdim=2)
     biases_now = T.flatten(biases[modzero, :])
     activation = T.dot(input_step, W_in_now)
     activation += T.dot(previous_activation, W_self_now)
     activation += biases_now
     activation = self.activation_function(activation)
     modzero_activation_changes = (modzero * self.group_size) + (
         T.ones((modzero.shape[0], self.group_size), dtype='int32') * T.arange(self.group_size, dtype='int32')).T
     modzero_flatten = T.flatten(modzero_activation_changes).astype('int32')
     new_activation = T.set_subtensor(new_activation[:, modzero_flatten], activation)
     time_step += 1
     return new_activation, time_step
开发者ID:ZenCCoding,项目名称:clockworkrnn-1,代码行数:16,代码来源:Clockwork.py


示例17: step

        def step(i, in_mask, ACT, ACT_, in_se, WT):
            sub_tree_idx_ = T.nonzero(WT[:, i, :] > -1)
            a_ = T.dot(in_se[:, i], self.WSM)  # + self.b
            if self.b is not None:
                a_ += self.b.dimshuffle('x', 0)
            a_ = a_ + T.sum(ACT_[:, i], axis=1)
            a_ = T.tanh(a_)
#            if self.dropout:
#                a_ = a_ / self.retain_prob * self._srng.binomial(a_.shape, p=self.retain_prob,
#                                                                 dtype=theano.config.floatX)
            a_ = T.switch(in_mask, a_, ACT[:, i-1])
            a__ = T.batched_tensordot(a_[sub_tree_idx_[0], :],
                                      self.WC[WT[sub_tree_idx_[0], 
                                                 i, sub_tree_idx_[1]]], axes=1)
#            if self.dropout:
#                a__ = a__ / self.retain_prob * self._srng.binomial(a__.shape, p=self.retain_prob,
#                                                                   dtype=theano.config.floatX)
            newACT_ = T.set_subtensor(ACT_[sub_tree_idx_[0], sub_tree_idx_[1], i],
                                      a__)
            newACT = T.set_subtensor(ACT[:, i], a_)
            return newACT, newACT_
开发者ID:nturusin,项目名称:allenchallenge,代码行数:21,代码来源:AAI_lasagne_GRNN_1.py


示例18: train_batch

    def train_batch(self, batch_size):
        T = self.AE.T
        T = T.tocsr()
        nonzero_indices = T.nonzero()
        #pdb.set_trace()
        n_users = len(np.unique(nonzero_indices[0]))
        indices = np.unique(nonzero_indices[0])
        for epoch in xrange(self.epochs):
            for ind, i in enumerate(xrange(0, n_users, batch_size)):
                # CHECK : SEEMS BUGGY. 
                #------------------------
                #ratings = T[indices[i:(i + batch_size)], :].toarray().astype(np.float32)
                ratings = T[indices[i:(i + batch_size)], :].toarray().astype(np.float32)

                #------------------------
                #print ratings
                #pdb.set_trace()
                loss = self.AE.ae_batch(ratings)
                #loss = self.AE.debug(ratings)
                #print loss
                #pdb.set_trace()
                print("Loss for epoch %d  batch %d is %f"%(epoch, ind, loss))
            print("RMSE after one epoch is %f"%(self.RMSE()))
开发者ID:shashankg7,项目名称:trust-inference-autoencoder,代码行数:23,代码来源:training.py


示例19: train

 def train(self):
     T = self.AE.T
     # COnverting to csr format for indexing
     T = T.tocsr()
     #pdb.set_trace()
     nonzero_indices = T.nonzero()
     for epoch in xrange(self.epochs):
         print("Running epoch %d"%(epoch))
         for i in np.unique(nonzero_indices[0]):
             # get indices of observed values from the user 'i' 's vector
             indices = T[i, :].nonzero()[1]
             #print indices
             #indices = indices.reshape(indices.shape[0],)
             # Get correspoding ratings
             ratings = T[i, indices].toarray()
             #print ratings
             ratings = ratings.reshape(ratings.shape[1],)
             # Convert inputs to theano datatype
             indices = indices.astype(np.int32)
             ratings = ratings.astype(np.int32)
             #pdb.set_trace()
             loss = self.AE.ae(indices, ratings)
             print("Loss at epoch %d is %f"%(epoch, loss))
开发者ID:shashankg7,项目名称:trust-inference-autoencoder,代码行数:23,代码来源:training.py


示例20: normal

    def normal(self, size, avg=0.0, std=1.0, ndim=None, dtype=None,
               nstreams=None, truncate=False, **kwargs):
        """
        Sample a tensor of values from a normal distribution.

        Parameters
        ----------
        size : int_vector_like
            Array dimensions for the output tensor.
        avg : float_like, optional
            The mean value for the truncated normal to sample from (defaults to 0.0).
        std : float_like, optional
            The standard deviation for the truncated normal to sample from (defaults to 1.0).
        truncate : bool, optional
            Truncates the normal distribution at 2 standard deviations if True (defaults to False).
            When this flag is set, the standard deviation of the result will be less than the one specified.
        ndim : int, optional
            The number of dimensions for the output tensor (defaults to None).
            This argument is necessary if the size argument is ambiguous on the number of dimensions.
        dtype : str, optional
            The data-type for the output tensor. If not specified,
            the dtype is inferred from avg and std, but it is at least as precise as floatX.
        kwargs
            Other keyword arguments for random number generation (see uniform).

        Returns
        -------
        samples : TensorVariable
            A Theano tensor of samples randomly drawn from a normal distribution.

        """
        size = _check_size(size)
        avg = undefined_grad(as_tensor_variable(avg))
        std = undefined_grad(as_tensor_variable(std))

        if dtype is None:
            dtype = scal.upcast(config.floatX, avg.dtype, std.dtype)

        avg = tensor.cast(avg, dtype=dtype)
        std = tensor.cast(std, dtype=dtype)

        # generate even number of uniform samples
        # Do manual constant folding to lower optiimizer work.
        if isinstance(size, theano.Constant):
            n_odd_samples = size.prod(dtype='int64')
        else:
            n_odd_samples = tensor.prod(size, dtype='int64')
        n_even_samples = n_odd_samples + n_odd_samples % 2
        uniform = self.uniform((n_even_samples, ), low=0., high=1.,
                               ndim=1, dtype=dtype, nstreams=nstreams, **kwargs)

        # box-muller transform
        u1 = uniform[:n_even_samples // 2]
        u2 = uniform[n_even_samples // 2:]
        r = tensor.sqrt(-2.0 * tensor.log(u1))
        theta = np.array(2.0 * np.pi, dtype=dtype) * u2
        cos_theta, sin_theta = tensor.cos(theta), tensor.sin(theta)
        z0 = r * cos_theta
        z1 = r * sin_theta

        if truncate:
            # use valid samples
            to_fix0 = (z0 < -2.) | (z0 > 2.)
            to_fix1 = (z1 < -2.) | (z1 > 2.)
            z0_valid = z0[tensor.nonzero(~to_fix0)]
            z1_valid = z1[tensor.nonzero(~to_fix1)]

            # re-sample invalid samples
            to_fix0 = tensor.nonzero(to_fix0)[0]
            to_fix1 = tensor.nonzero(to_fix1)[0]
            n_fix_samples = to_fix0.size + to_fix1.size
            lower = tensor.constant(1. / np.e**2, dtype=dtype)
            u_fix = self.uniform((n_fix_samples, ), low=lower, high=1.,
                                 ndim=1, dtype=dtype, nstreams=nstreams, **kwargs)
            r_fix = tensor.sqrt(-2. * tensor.log(u_fix))
            z0_fixed = r_fix[:to_fix0.size] * cos_theta[to_fix0]
            z1_fixed = r_fix[to_fix0.size:] * sin_theta[to_fix1]

            # pack everything together to a useful result
            norm_samples = tensor.join(0, z0_valid, z0_fixed, z1_valid, z1_fixed)
        else:
            norm_samples = tensor.join(0, z0, z1)
        if isinstance(n_odd_samples, theano.Variable):
            samples = norm_samples[:n_odd_samples]
        elif n_odd_samples % 2 == 1:
            samples = norm_samples[:-1]
        else:
            samples = norm_samples
        samples = tensor.reshape(samples, newshape=size, ndim=ndim)
        samples *= std
        samples += avg

        return samples
开发者ID:DEVESHTARASIA,项目名称:Theano,代码行数:93,代码来源:rng_mrg.py



注:本文中的theano.tensor.nonzero函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。


鲜花

握手

雷人

路过

鸡蛋
该文章已有0人参与评论

请发表评论

全部评论

专题导读
上一篇:
Python tensor.ones函数代码示例发布时间:2022-05-27
下一篇:
Python tensor.neq函数代码示例发布时间:2022-05-27
热门推荐
阅读排行榜

扫描微信二维码

查看手机版网站

随时了解更新最新资讯

139-2527-9053

在线客服(服务时间 9:00~18:00)

在线QQ客服
地址:深圳市南山区西丽大学城创智工业园
电邮:jeky_zhao#qq.com
移动电话:139-2527-9053

Powered by 互联科技 X3.4© 2001-2213 极客世界.|Sitemap