• 设为首页
  • 点击收藏
  • 手机版
    手机扫一扫访问
    迪恩网络手机版
  • 关注官方公众号
    微信扫一扫关注
    公众号

Python tensor.le函数代码示例

原作者: [db:作者] 来自: [db:来源] 收藏 邀请

本文整理汇总了Python中theano.tensor.le函数的典型用法代码示例。如果您正苦于以下问题:Python le函数的具体用法?Python le怎么用?Python le使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。



在下文中一共展示了le函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。

示例1: prepareTraining

    def prepareTraining(self):
        '''
        Prepares the relevant functions
        (details on neural_net_creator's prepareTraining)
        '''
        #loss objective to minimize
        self.prediction = lasagne.layers.get_output(self.network)
        self.prediction=self.prediction[:,0]
        #self.loss = lasagne.objectives.categorical_crossentropy(self.prediction, self.target_var)
        #the loss is now the squared error in the output
        self.loss =  lasagne.objectives.squared_error(self.prediction, self.target_var)
        self.loss = self.loss.mean()

        self.params = lasagne.layers.get_all_params(self.network, trainable=True)
        self.updates = lasagne.updates.nesterov_momentum(
                self.loss, self.params, learning_rate=0.01, momentum=0.9)

        self.test_prediction = lasagne.layers.get_output(self.network, deterministic=True)
        self.test_prediction=self.test_prediction[:,0]
        self.test_loss = lasagne.objectives.squared_error(self.test_prediction, self.target_var)
        self.test_loss = self.test_loss.mean()
        #the accuracy is now the number of sample that achieve a 0.01 precision (can be changed)
        self.test_acc = T.mean(T.le(T.abs_(T.sub(self.test_prediction,self.target_var)),0.01)
                            , dtype=theano.config.floatX)
        self.test_acc2 = T.mean(T.le(T.abs_(T.sub(self.test_prediction,self.target_var)),0.05)
                            , dtype=theano.config.floatX)
        self.test_acc3 = T.mean(T.le(T.abs_(T.sub(self.test_prediction,self.target_var)),0.1)
                            , dtype=theano.config.floatX)

        self.train_fn = theano.function([self.input_var, self.target_var], self.loss, updates=self.updates)

        self.val_fn = theano.function([self.input_var, self.target_var], [self.test_loss,self.test_acc,self.test_acc2,self.test_acc3])

        self.use = theano.function([self.input_var],[self.test_prediction])
开发者ID:vr367305,项目名称:s4r_metal,代码行数:34,代码来源:regression_test.py


示例2: _get_targets

def _get_targets(y, log_y_hat, y_mask, y_hat_mask):
    '''
    Returns the target values according to the CTC cost with respect to y_hat.
    Note that this is part of the gradient with respect to the softmax output
    and not with respect to the input of the original softmax function.
    All computations are done in log scale
    '''
    num_classes = log_y_hat.shape[2] - 1
    blanked_y, blanked_y_mask = _add_blanks(
        y=y,
        blank_symbol=num_classes,
        y_mask=y_mask)

    log_alpha, log_beta = _log_forward_backward(blanked_y,
                                                log_y_hat, blanked_y_mask,
                                                y_hat_mask, num_classes)
    # explicitly not using a mask to prevent inf - inf
    y_prob = _class_batch_to_labeling_batch(blanked_y, log_y_hat,
                                            y_hat_mask=None)
    marginals = log_alpha + log_beta - y_prob
    max_marg = marginals.max(2)
    max_marg = T.switch(T.le(max_marg, -np.inf), 0, max_marg)
    log_Z = T.log(T.exp(marginals - max_marg[:,:, None]).sum(2))
    log_Z = log_Z + max_marg
    log_Z = T.switch(T.le(log_Z, -np.inf), 0, log_Z)
    targets = _labeling_batch_to_class_batch(blanked_y,
                                             T.exp(marginals -
                                                   log_Z[:,:, None]),
                                             num_classes + 1)
    return targets
开发者ID:trungnt13,项目名称:dnntoolkit,代码行数:30,代码来源:ctc_cost.py


示例3: call

 def call(self, X):
     if type(X) is not list or len(X) != 2:
         raise Exception("SquareAttention must be called on a list of two tensors. Got: " + str(X))
         
     frame, position  = X[0], X[1]
     
     # Reshaping the input to exclude the time dimension
     frameShape = K.shape(frame)
     positionShape = K.shape(position)
     (chans, height, width) = frameShape[-3:]
     targetDim = positionShape[-1]
     frame = K.reshape(frame, (-1, chans, height, width))
     position = K.reshape(position, (-1, ) + (targetDim, ))
     
     # Applying the attention
     hw = THT.abs_(position[:, 2] - position[:, 0]) * self.scale / 2.0
     hh = THT.abs_(position[:, 3] - position[:, 1]) * self.scale / 2.0
     position = THT.maximum(THT.set_subtensor(position[:, 0], position[:, 0] - hw), -1.0)
     position = THT.minimum(THT.set_subtensor(position[:, 2], position[:, 2] + hw), 1.0)
     position = THT.maximum(THT.set_subtensor(position[:, 1], position[:, 1] - hh), -1.0)
     position = THT.minimum(THT.set_subtensor(position[:, 3], position[:, 3] + hh), 1.0)
     rX = Data.linspace(-1.0, 1.0, width)
     rY = Data.linspace(-1.0, 1.0, height)
     FX = THT.gt(rX, position[:,0].dimshuffle(0,'x')) * THT.le(rX, position[:,2].dimshuffle(0,'x'))
     FY = THT.gt(rY, position[:,1].dimshuffle(0,'x')) * THT.le(rY, position[:,3].dimshuffle(0,'x'))
     m = FY.dimshuffle(0, 1, 'x') * FX.dimshuffle(0, 'x', 1)
     m = m + self.alpha - THT.gt(m, 0.) * self.alpha
     frame = frame * m.dimshuffle(0, 'x', 1, 2)
     
     # Reshaping the frame to include time dimension
     output = K.reshape(frame, frameShape)
     
     return output
开发者ID:fhdiaze,项目名称:DeepTracking,代码行数:33,代码来源:SquareAttention.py


示例4: calc_time_gate

        def calc_time_gate(time_input_n):
            # Broadcast the time across all units
            t_broadcast = time_input_n.dimshuffle([0,'x'])
            # Get the time within the period
            in_cycle_time = T.mod(t_broadcast + shift_broadcast, period_broadcast)
            # Find the phase
            is_up_phase = T.le(in_cycle_time, on_mid_broadcast)
            is_down_phase = T.gt(in_cycle_time, on_mid_broadcast)*T.le(in_cycle_time, on_end_broadcast)
            # Set the mask
            sleep_wake_mask = T.switch(is_up_phase, in_cycle_time/on_mid_broadcast,
                                T.switch(is_down_phase,
                                    (on_end_broadcast-in_cycle_time)/on_mid_broadcast,
                                        off_slope*(in_cycle_time/period_broadcast)))

            return sleep_wake_mask
开发者ID:HenryWoodOTC,项目名称:time_lstm,代码行数:15,代码来源:plstm.py


示例5: tied_neighbours

def tied_neighbours(preds, n_sample_preds, n_classes):
    eps = 1e-8
    #preds = T.clip(preds, eps, 1-eps)
    preds_per_trial_row = preds.reshape((-1, n_sample_preds, n_classes))
    earlier_neighbours = preds_per_trial_row[:,:-1]
    later_neighbours = preds_per_trial_row[:,1:]
    # Have to now ensure first values are larger zero
    # for numerical stability :/
    # Example of problem otherwise:
    """
    a = T.fmatrix()
    b = T.fmatrix()
    soft_out_a =softmax(a)
    soft_out_b =softmax(b)
    
    loss = categorical_crossentropy(soft_out_a[:,1:],soft_out_b[:,:-1])
    neigh_fn = theano.function([a,b], loss)
    
    neigh_fn(np.array([[0,1000,0]], dtype=np.float32), 
        np.array([[0.1,0.9,0.3]], dtype=np.float32))
    -> inf
    """
    
    # renormalize(?)
    
    earlier_neighbours = (T.gt(earlier_neighbours, eps) * earlier_neighbours + 
        T.le(earlier_neighbours, eps) * earlier_neighbours + eps)
    loss = categorical_crossentropy(earlier_neighbours, later_neighbours)
    return loss
开发者ID:robintibor,项目名称:braindecode,代码行数:29,代码来源:objectives.py


示例6: each_loss

        def each_loss(outpt, inpt):
            # y 是填充了blank之后的ans
            blank = 26
            y_nblank = T.neq(inpt, blank)
            n = T.dot(y_nblank, y_nblank)  # 真实的字符长度
            N = 2 * n + 1  # 填充后的字符长度,去除尾部多余的填充
            labels = inpt[:N]
            labels2 = T.concatenate((labels, [blank, blank]))
            sec_diag = T.neq(labels2[:-2], labels2[2:]) * T.eq(labels2[1:-1], blank)
            recurrence_relation = \
                T.eye(N) + \
                T.eye(N, k=1) + \
                T.eye(N, k=2) * sec_diag.dimshuffle((0, 'x'))

            pred_y = outpt[:, labels]

            fwd_pbblts, _ = theano.scan(
                lambda curr, accum: T.switch(T.eq(curr*T.dot(accum, recurrence_relation), 0.0),
                                             T.dot(accum, recurrence_relation)
                                             , curr*T.dot(accum, recurrence_relation)),
                sequences=[pred_y],
                outputs_info=[T.eye(N)[0]]
            )
            #return fwd_pbblts
            #liklihood = fwd_pbblts[0, 0]
            liklihood = fwd_pbblts[-1, -1] + fwd_pbblts[-1, -2]
            #liklihood = T.switch(T.lt(liklihood, 1e-35), 1e-35, liklihood)
            #loss = -T.log(T.cast(liklihood, "float32"))
            #loss = 10 * (liklihood - 1) * (liklihood - 100)
            loss = (T.le(liklihood, 1.0)*(10*(liklihood-1)*(liklihood-100)))+(T.gt(liklihood, 1.0)*(-T.log(T.cast(liklihood, "float32"))))
            return loss
开发者ID:nightinwhite,项目名称:Theano-NN_Starter,代码行数:31,代码来源:Layer.py


示例7: __init__

 def __init__(self, input, sigma=20.0, window_radius=60):
     self.input = input
     self.sigma = theano.shared(value=np.array(sigma, dtype=theano.config.floatX), name='sigma')
     apply_blur = T.gt(self.sigma, 0.0)
     no_blur = T.le(self.sigma, 0.0)
     self.output = ifelse(no_blur, input, gaussian_filter(input.dimshuffle('x', 0, 1), self.sigma, window_radius)[0, :, :])
     self.params = [self.sigma]
开发者ID:matthias-k,项目名称:pysaliency,代码行数:7,代码来源:theano_utils.py


示例8: compile

    def compile(self):
        # 1D: n_words, 2D: batch * n_cands
        self.x = T.imatrix()
        self.y = T.fvector()
        self.train_inputs = [self.x, self.y]
        self.pred_inputs = [self.x]

        self.activation = self.args.activation
        self.n_d = self.args.hidden_dim
        self.n_e = self.emb_layers[0].n_d
        self.pad_id = self.emb_layers[0].vocab_map[PAD]
        self.dropout = theano.shared(np.float32(self.args.dropout).astype(theano.config.floatX))

        self._set_layers(args=self.args, n_d=self.n_d, n_e=self.n_e)

        ###########
        # Network #
        ###########
        h_in = self._input_layer(x=self.x)
        h = self._mid_layer(h_prev=h_in, x=self.x, pad_id=self.pad_id)
        y_scores = self._output_layer(h=h)
        self.y_pred = T.le(0.5, y_scores)

        #########################
        # Set an objective func #
        #########################
        self.set_params(layers=self.layers)
        self.loss = self.set_loss(self.y, y_scores)
        self.cost = self.set_cost(args=self.args, params=self.params, loss=self.loss)
开发者ID:hiroki13,项目名称:neural-sentence-matching-system,代码行数:29,代码来源:sent_matching_model.py


示例9: gate_layer

def gate_layer(tparams, X_word, X_char, options, prefix, pretrain_mode, activ='lambda x: x', **kwargs):
    """ 
    compute the forward pass for a gate layer

    Parameters
    ----------
    tparams        : OrderedDict of theano shared variables, {parameter name: value}
    X_word         : theano 3d tensor, word input, dimensions: (num of time steps, batch size, dim of vector)
    X_char         : theano 3d tensor, char input, dimensions: (num of time steps, batch size, dim of vector)
    options        : dictionary, {hyperparameter: value}
    prefix         : string, layer name
    pretrain_mode  : theano shared scalar, 0. = word only, 1. = char only, 2. = word & char
    activ          : string, activation function: 'liner', 'tanh', or 'rectifier'

    Returns
    -------
    X              : theano 3d tensor, final vector, dimensions: (num of time steps, batch size, dim of vector)

    """      
    # compute gating values, Eq.(3)
    G = tensor.nnet.sigmoid(tensor.dot(X_word, tparams[p_name(prefix, 'v')]) + tparams[p_name(prefix, 'b')][0])
    X = ifelse(tensor.le(pretrain_mode, numpy.float32(1.)),  
               ifelse(tensor.eq(pretrain_mode, numpy.float32(0.)), X_word, X_char),
               G[:, :, None] * X_char + (1. - G)[:, :, None] * X_word)   
    return eval(activ)(X)
开发者ID:nyu-dl,项目名称:gated_word_char_rlm,代码行数:25,代码来源:layers.py


示例10: logp_loss3

	def logp_loss3(self, x, y, fake_label,neg_label, pos_ratio = 0.5): #adopt maxout  for  negative   
		# pos_rati0  means  pos examples weight (0.5 means  equal 1:1)


		print "adopt  positives  weight  ............. "+str(pos_ratio)
		y = y.dimshuffle((1,0))
		inx = x.dimshuffle((1,0))
		fake_mask = T.neq(y, fake_label)
		y = y*fake_mask

		pos_mask = T.and_(fake_mask, T.le(y, neg_label-1))*pos_ratio
		neg_mask = T.ge(y, neg_label)*(1- pos_ratio)


		pos_score, neg_score = self.structure2(inx,False)
		maxneg = T.max(neg_score, axis = -1)

		scores = T.concatenate((pos_score, maxneg.dimshuffle((0,1,'x'))), axis = 2)

		d3shape = scores.shape

		#seq*batch , label
		scores = scores.reshape((d3shape[0]*d3shape[1],  d3shape[2]))
		pro = T.nnet.softmax(scores)

		_logp = T.nnet.categorical_crossentropy(pro, y.flatten())

		_logp = _logp.reshape(fake_mask.shape)

		loss = (T.sum(_logp*pos_mask)+ T.sum(_logp*neg_mask))/ (T.sum(pos_mask)+T.sum(neg_mask))
		pos_loss = T.sum(_logp*pos_mask)
		neg_loss = T.sum(_logp*neg_mask)


		return loss, pos_loss, neg_loss
开发者ID:mswellhao,项目名称:active_NER,代码行数:35,代码来源:token_model.py


示例11: logp

    def logp(self, value):
        p = self.p
        k = self.k

        sumto1 = theano.gradient.zero_grad(T.le(abs(T.sum(p) - 1), 1e-5))
        return bound(T.log(p[value]),
                     value >= 0, value <= (k - 1),
                     sumto1)
开发者ID:lihuanshuai,项目名称:pymc3,代码行数:8,代码来源:discrete.py


示例12: objective

def objective(y_true, y_pred, P, Q, alpha=0., beta=0.15, dbeta=0., gamma=0.01, gamma1=-1., poos=0.23, eps=1e-6):
    '''Expects a binary class matrix instead of a vector of scalar classes.
    '''

    beta = np.float32(beta)
    dbeta = np.float32(dbeta)
    gamma = np.float32(gamma)
    poos = np.float32(poos)
    eps = np.float32(eps)

    # scale preds so that the class probas of each sample sum to 1
    y_pred += eps
    y_pred /= y_pred.sum(axis=-1, keepdims=True)

    y_true = T.cast(y_true.flatten(), 'int64')
    y1 = T.and_(T.gt(y_true, 0), T.le(y_true, Q))  # in-set
    y0 = T.or_(T.eq(y_true, 0), T.gt(y_true, Q))  # out-of-set or unlabeled
    y0sum = y0.sum() + eps  # number of oos
    y1sum = y1.sum() + eps  # number of in-set
    # we want to reduce cross entrophy of labeled data
    # convert all oos/unlabeled to label=0
    cost0 = T.nnet.categorical_crossentropy(y_pred, T.switch(y_true <= Q, y_true, 0))
    cost0 = T.dot(y1, cost0) / y1sum  # average cost per labeled example

    if alpha:
        cost1 = T.nnet.categorical_crossentropy(y_pred, y_pred)
        cost1 = T.dot(y0, cost1) / y0sum  # average cost per labeled example
        cost0 += alpha*cost1

    # we want to increase the average entrophy in each batch
    # average over batch
    if beta:
        y_pred_avg0 = T.dot(y0, y_pred) / y0sum
        y_pred_avg0 = T.clip(y_pred_avg0, eps, np.float32(1) - eps)
        y_pred_avg0 /= y_pred_avg0.sum(axis=-1, keepdims=True)
        cost2 = T.nnet.categorical_crossentropy(y_pred_avg0.reshape((1,-1)), P-dbeta)[0] # [None,:]
        cost2 = T.switch(y0sum > 0.5, cost2, 0.)  # ignore cost2 if no samples
        cost0 += beta*cost2

    # binary classifier score
    if gamma:
        y_pred0 = T.clip(y_pred[:,0], eps, np.float32(1) - eps)
        if gamma1 < 0.:
            cost3 = - T.dot(poos*y0,T.log(y_pred0)) - T.dot(np.float32(1)-poos*y0.T,T.log(np.float32(1)-y_pred0))
            cost3 /= y_pred.shape[0]
            cost0 += gamma*cost3
        elif gamma1 > 0.:
            cost3 = - T.dot(poos*y0,T.log(y_pred0)) - T.dot((np.float32(1)-poos)*y0,T.log(np.float32(1)-y_pred0))
            cost3 /= y0sum
            cost31 =  - T.dot(y1,T.log(np.float32(1)-y_pred0))
            cost3 /= y1sum
            cost0 += gamma*cost3 + gamma1*cost31
        else:  # gamma1 == 0.
            cost3 = - T.dot(poos*y0,T.log(y_pred0)) - T.dot((np.float32(1)-poos)*y0, T.log(np.float32(1)-y_pred0))
            cost3 /= y0sum
            cost0 += gamma*cost3
    return cost0
开发者ID:fulldecent,项目名称:LRE,代码行数:57,代码来源:ladder.py


示例13: cost

 def cost(self,Y,Y_hat):
     w = T.fscalar()
     r = self.r
     w = 0.05
     i = T.le(Y,w)
     j = T.eq(i,0)
     z = T.join(0,Y[i]/r,Y[j])
     z_hat = T.join(0,Y_hat[i]/r,Y_hat[j])
     return super(linear_mlp_bayesian_cost,self).cost(z,z_hat)
开发者ID:leinxx,项目名称:pylearn2_cnn,代码行数:9,代码来源:cnn_pl_sar.py


示例14: clip_gradients

def clip_gradients(gparams, threshold=5.):
    clipped_gparams = []
    for gparam in gparams:
        norm_gparam = T.sqrt(T.sqr(gparam).sum())
        clipped_gparams.append(T.switch(T.le(norm_gparam, threshold),
                                        gparam,
                                        (gparam/norm_gparam)*threshold))

    return clipped_gparams
开发者ID:briancheung,项目名称:Peano,代码行数:9,代码来源:optimizer.py


示例15: huber_loss

def huber_loss(y_true, y_pred, delta=1., axis=None):
    a = y_true - y_pred
    squared_loss = 0.5*T.sqr(a)
    absolute_loss = (delta*abs(a) - 0.5*T.sqr(delta))

    cost = T.switch(T.le(abs(a), delta),
                    squared_loss,
                    absolute_loss)
    return cost.mean(axis=axis)
开发者ID:briancheung,项目名称:Peano,代码行数:9,代码来源:cost.py


示例16: theano_symbolic_dtw

def theano_symbolic_dtw(x1, x2, x1_lengths, x2_lengths, distance_function=cosine, normalize=True, debug_level=None,
                        eps=None):
    """
    A symbolic implementation of DTW that supports batches of sequence pairs.

    Returns a scalar if ndim == 2 and a vector of size x1.shape[1] if ndim == 3

    This is slow! About 90 times slower than the Cython implementation using the parameters below.

    :param x1: A tensor containing the first side of the sequence pairs to be aligned.
    :param x2: A tensor containing the second side of the sequence pairs to be aligned.
    :param x1_lengths: An integer vector identifying the lengths of the sequences in x1
    :param x2_lengths: An integer vector identifying the lengths of the sequences in x2
    :param distance_function: The symbolic distance function to use (e.g. a reference to a function in
                              distance).
    :param normalize: Whether the DTW distances should be sequence length normalized.
    :param debug_level: The debug level to use (see above for explanation).
    :param eps: The minimum value to use inside the distance function. Set to the machine epsilon if None.
    :return: The DTW distances for every sequence pair in the batch.
    """

    if eps is None:
        eps = numpy.dtype(theano.config.floatX).type(numpy.finfo(float).eps)

    assert 0 <= x1_lengths.ndim == x2_lengths.ndim <= 1
    assert isinstance(normalize, bool)

    ndim = x1.ndim
    assert 2 <= ndim == x2.ndim <= 3

    # Ensure x2 is the shorter input to minimize the number of scan iterations
    x1_shorter_than_x2 = tt.le(x1.shape[0], x2.shape[0])
    x1, x2 = _swap(x1_shorter_than_x2, x1, x2, 'x1', 'x2', debug_level)
    x1_lengths, x2_lengths = _swap(x1_shorter_than_x2, x1_lengths, x2_lengths, 'x1_lengths', 'x2_lengths', debug_level)

    # Compute distances between x1 sequences and paired x2 sequences
    d = distance_function(x1, x2, eps)

    # Iterate over the temporal slices of x2. See dtw_outer_step for an explanation of the other inputs to this scan
    # operation
    x1_indexes = tt.arange(x1.shape[0], dtype=DTYPE_INT64)
    results, _ = theano.scan(_create_dtw_outer_step(distance_function, debug_level), sequences=[x1_indexes, d],
                             outputs_info=[
                                 tt.zeros_like(x2[:, :, 0] if x2.ndim == 3 else x2[:, 0], dtype=theano.config.floatX)],
                             non_sequences=[x1_lengths, x2_lengths])
    result = results[x1_lengths - 1, x2_lengths - 1, tt.arange(x1.shape[1])] if x2.ndim == 3 else results[
        x1_lengths - 1, x2_lengths - 1]
    result = _debug(result, 'theano_symbolic_dtw.result', debug_level)
    assert result.ndim == x1_lengths.ndim

    # Length normalize the distances if requested to do so
    if normalize:
        result = _debug(result / tt.cast(x1_lengths + x2_lengths, dtype=utility.get_standard_dtype()),
                        'theano_symbolic_dtw.norm_result', debug_level)

    return result
开发者ID:OlafLee,项目名称:TheanoBatchDTW,代码行数:56,代码来源:dtw.py


示例17: logp

    def logp(self, value):
        p = self.p
        k = self.k

        sumto1 = theano.gradient.zero_grad(tt.le(abs(tt.sum(p, axis=-1) - 1), 1e-5))
        if p.ndim > 1:
            a = tt.log(p[tt.arange(p.shape[0]), value])
        else:
            a = tt.log(p[value])
        return bound(a, value >= 0, value <= (k - 1), sumto1)
开发者ID:hvasbath,项目名称:pymc3,代码行数:10,代码来源:discrete.py


示例18: logp

    def logp(self, x):
        n = self.n
        p = self.p

        X = x[self.tri_index]
        X = t.fill_diagonal(X, 1)

        result = self._normalizing_constant(n, p)
        result += (n - 1.0) * log(det(X))
        return bound(result, n > 0, all(le(X, 1)), all(ge(X, -1)))
开发者ID:paintingpeter,项目名称:pymc3,代码行数:10,代码来源:multivariate.py


示例19: output_index

 def output_index(self):
   from theano.ifelse import ifelse
   index = self.index
   if self.sources:
     # In some cases, e.g. forwarding, the target index (for "classes") might have shape[0]==0.
     # Or shape[0]==1 with index[0]==0. See Dataset.shapes_for_batches().
     # Use source index in that case.
     have_zero = T.le(index.shape[0], 1) * T.eq(T.sum(index[0]), 0)
     index = ifelse(have_zero, T.cast(self.sources[0].index,'int8'), T.cast(index,'int8'))
   return index
开发者ID:rwth-i6,项目名称:returnn,代码行数:10,代码来源:NetworkBaseLayer.py


示例20: innerL_

def innerL_(sS, i):
    Ei = calcEk_(sS, i)
    
    # use "+" instead of "or" and "*" instead of "and"
    checkUselessAlpha1 = T.ge(sS.labels[i] * Ei, -sS.tol) + T.ge(sS.alphas[i], sS.C)
    checkUselessAlpha2 = T.le(sS.labels[i]*Ei, sS.tol) + T.lt(sS.alphas[i], 0)
    isUselessAlpha = toTheanoBool(checkUselessAlpha1 * checkUselessAlpha2)
    
    updateL = innerL_alphaInRange_(sS, i, Ei)
    earlyret = sS.retlist(0)
    return ifelse(isUselessAlpha, earlyret, updateL)
开发者ID:martinmeinke,项目名称:ipml,代码行数:11,代码来源:theanoSMO.py



注:本文中的theano.tensor.le函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。


鲜花

握手

雷人

路过

鸡蛋
该文章已有0人参与评论

请发表评论

全部评论

专题导读
上一篇:
Python tensor.lmatrix函数代码示例发布时间:2022-05-27
下一篇:
Python tensor.join函数代码示例发布时间:2022-05-27
热门推荐
阅读排行榜

扫描微信二维码

查看手机版网站

随时了解更新最新资讯

139-2527-9053

在线客服(服务时间 9:00~18:00)

在线QQ客服
地址:深圳市南山区西丽大学城创智工业园
电邮:jeky_zhao#qq.com
移动电话:139-2527-9053

Powered by 互联科技 X3.4© 2001-2213 极客世界.|Sitemap