• 设为首页
  • 点击收藏
  • 手机版
    手机扫一扫访问
    迪恩网络手机版
  • 关注官方公众号
    微信扫一扫关注
    公众号

Python tensor.ones_like函数代码示例

原作者: [db:作者] 来自: [db:来源] 收藏 邀请

本文整理汇总了Python中theano.tensor.ones_like函数的典型用法代码示例。如果您正苦于以下问题:Python ones_like函数的具体用法?Python ones_like怎么用?Python ones_like使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。



在下文中一共展示了ones_like函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。

示例1: castray

def castray(ro, rd, shape_params, nprims, width, height):
    tmin = 1.0
    tmax = 20.0
    precis = 0.002
    m = -1.0
    # There are a sequence of distances, d1, d2, ..., dn
    # then theres the accumulated distances d1, d1+d2, d1+d2+d3....
    # What we actually want in the output is the sfor each ray the distance to the surface
    # So we want something like 0, 20, 25, 27, 28, 28, 28, 28, 28
    # OK

    max_num_steps = 25

    # distcolors = map(ro + rd * 0, width, height) #FIXME, reshape instead of mul by 0
    distcolors = mapedit(ro + rd * 0, shape_params, nprims, width, height)
    dists = distcolors
    steps = T.switch(dists < precis, T.zeros_like(dists), T.ones_like(dists))
    accum_dists = T.reshape(dists, (width, height, 1))

    for i in range(max_num_steps - 1):
        # distcolors = map(ro + rd * accum_dists, width, height) #FIXME, reshape instead of mul by 0
        distcolors = mapedit(ro + rd * accum_dists, shape_params, nprims, width, height) #FIXME, reshape instead of mul by 0
        dists = distcolors
        steps = steps + T.switch(dists < precis, T.zeros_like(dists), T.ones_like(dists))
        accum_dists = accum_dists + T.reshape(dists, (width, height, 1))

    last_depth = T.reshape(accum_dists, (width, height))
    depthmap = T.switch(last_depth < tmax, last_depth / tmax, T.zeros_like(last_depth))
    color = 1.0 - steps / float(max_num_steps)
    # Distance marched along ray and delta between last two steps
    return depthmap
开发者ID:zenna,项目名称:Arrows.jl,代码行数:31,代码来源:iq.py


示例2: calc_CER

    def calc_CER(self, resultseq, targetseq, resultseq_mask=None, targetseq_mask=None):
        """
        Calculate the character error rate (CER) given ground truth 'targetseq' and CTC decoding output 'resultseq'
        :param resultseq (T1,  B)
        :param resultseq_mask (T1, B)
        :param targetseq (T2,  B)
        :param targetseq_mask (T2, B)
        :return: CER scalar
        """
        if resultseq_mask is None:
            resultseq_mask = tensor.ones_like(resultseq)
        if targetseq_mask is None:
            targetseq_mask = tensor.ones_like(targetseq)

        def step(result_seq, target_seq, result_seq_mask, target_seq_mask, TE, TG):
            L1 = tensor.cast(result_seq_mask.sum(), 'int32')
            L2 = tensor.cast(target_seq_mask.sum(), 'int32')
            d = self._editdist(result_seq[0:L1], target_seq[0:L2])
            TE += d
            TG += target_seq_mask.sum()
            return TE, TG

        outputs, updates = theano.scan(fn=step,
                                       sequences=[resultseq.T, targetseq.T, resultseq_mask.T, targetseq_mask.T],
                                       outputs_info=[tensor.zeros(1), tensor.zeros(1)],
                                       name='calc_CER')
        TE, TG = outputs[0][-1], outputs[1][-1]
        CER = TE/TG
        return CER, TE, TG
开发者ID:DingKe,项目名称:Precise-CTC,代码行数:29,代码来源:ctc_theano.py


示例3: forward_prop_step

        def forward_prop_step(x_t, s_t1_prev, s_t2_prev):
            ''' Inner function encapsulating a propagation step
            This is how we calculated the hidden state in a simple RNN. No longer!
            s_t = T.tanh(U[:,x_t] + W.dot(s_t1_prev))
            '''
            # Word embedding layer
            x_e = E[:,x_t]
            
            # GRU Layer 1
            z_t1 = T.nnet.hard_sigmoid(U[0].dot(x_e) + W[0].dot(s_t1_prev) + b[0])
            r_t1 = T.nnet.hard_sigmoid(U[1].dot(x_e) + W[1].dot(s_t1_prev) + b[1])
            c_t1 = T.tanh(U[2].dot(x_e) + W[2].dot(s_t1_prev * r_t1) + b[2])
            s_t1 = (T.ones_like(z_t1) - z_t1) * c_t1 + z_t1 * s_t1_prev
            
            # GRU Layer 2
            z_t2 = T.nnet.hard_sigmoid(U[3].dot(s_t1) + W[3].dot(s_t2_prev) + b[3])
            r_t2 = T.nnet.hard_sigmoid(U[4].dot(s_t1) + W[4].dot(s_t2_prev) + b[4])
            c_t2 = T.tanh(U[5].dot(s_t1) + W[5].dot(s_t2_prev * r_t2) + b[5])
            s_t2 = (T.ones_like(z_t2) - z_t2) * c_t2 + z_t2 * s_t2_prev
            
            # Final output calculation
            # Theano's softmax returns a matrix with one row, we only need the row
            o_t = T.nnet.softmax(V.dot(s_t2) + c)[0]

            return [o_t, s_t1, s_t2]
开发者ID:BaluJr,项目名称:ProgramAnalysisProject,代码行数:25,代码来源:gru_theano.py


示例4: f

 def f(X):
     """
     Apply hard local winner-take-all on every rows of a theano matrix.
     Parameters
     ----------
     p: theano matrix
         Matrix on whose rows LWTA will be applied.
     block_size: int
         Number of units in each block.
     """
     p = X
     batch_size = p.shape[0]
     num_filters = p.shape[1]
     num_blocks = num_filters // block_size
     w = p.reshape((batch_size, num_blocks, block_size))
     block_max = w.max(axis=2).dimshuffle(0, 1, 'x') * T.ones_like(w)
     max_mask = T.cast(w >= block_max, 'float32')
     indices = np.array(range(1, block_size + 1))
     max_mask2 = max_mask * indices
     block_max2 = max_mask2.max(axis=2).dimshuffle(
         0, 1, 'x') * T.ones_like(w)
     max_mask3 = T.cast(max_mask2 >= block_max2, 'float32')
     w2 = w * max_mask3
     w3 = w2.reshape((p.shape[0], p.shape[1]))
     return w3
开发者ID:mehdidc,项目名称:lasagnekit,代码行数:25,代码来源:easy.py


示例5: step_fun

 def step_fun(self):
     if self._step_fun is None:
         inputs = T.matrix('inputs')
         states_tm1 = [T.matrix('state_%d_%d_tm1' % (layer, state))
                       for layer in range(self.n_layers)
                       for state in range(self.gate0.n_states)]
         if self.gates[-1].use_attention:
             raise NotImplementedError('Stacked RNN with attention')
             attended=T.tensor3('attended')
             attended_dot_u=T.tensor3('attended_dot_u')
             attention_mask=T.matrix('attention_mask')
             self._step_fun = function(
                     [inputs] + states_tm1 + [
                         attended, attended_dot_u, attention_mask],
                     self.step(*([inputs, T.ones(inputs.shape[:-1])] +
                                 states_tm1 + [T.ones_like(states_tm1[0]),
                                 attended, attended_dot_u,
                                 attention_mask])),
                     name='%s_step_fun'%self.name)
         else:
             self._step_fun = function(
                     [inputs] + states_tm1,
                     self.step(*([inputs, T.ones(inputs.shape[:-1])] +
                               states_tm1 + [T.ones_like(states_tm1[0])])),
                     name='%s_step_fun'%self.name)
     return self._step_fun
开发者ID:robertostling,项目名称:bnas,代码行数:26,代码来源:model.py


示例6: get_output

	def get_output(self, train=False):

		X = self.get_input(train)

		full = T.ones_like(X)
		masks = [full]

		for i in xrange(len(self.input_shapes)):
			mask = T.ones_like(X)
			idx = 0
			for j in xrange(len(self.input_shapes)):
				if i == j:
					try:
						ishape = len(self.input_shapes[0])
					except:
						ishape = [1]
						pass
					if len(ishape)  == 3:
						mask = T.set_subtensor(mask[:,:,idx : idx+ self.input_shapes[j]], 0)
					elif len(ishape) == 2:
						mask = T.set_subtensor(mask[:,idx : idx+ self.input_shapes[j]], 0)
					elif len(ishape) == 1:
						mask = T.set_subtensor(mask[idx : idx+ self.input_shapes[j]], 0)
					else:
						raise NotImplementedError()
				idx =  idx + self.input_shapes[j]
			masks += [mask]
		masked = T.stack(masks)

		if train:
			index  = self.trng.random_integers(size=(1,),low = 0, high = len(masks)-1)[0]
		else:
			index = 0
		masked_output = X * masked[index]
		return masked_output
开发者ID:hongyuanzhu,项目名称:keras,代码行数:35,代码来源:dropmodality.py


示例7: _cdf

    def _cdf(self, para, X):
        '''
        '''
        z = self._z(para, X)
        b = para['b'].value
        d = para['d'].value
        s = para['s'].value

        b = b.dimshuffle(0, 'x')
        NU = TT.extra_ops.cumsum(
            TT.concatenate((b, TT.sqr(d)), axis=1),
            axis=1)

        NU = TT.concatenate(
            (-1e20 * TT.ones_like(b), NU, 1e20 * TT.ones_like(b)),
            axis=1)

        NU = NU.dimshuffle('x', 0, 1)
        Z = z.dimshuffle(1, 0, 'x')
        Z = TT.extra_ops.repeat(Z, NU.shape[2], 2)
        S = s.dimshuffle('x', 0, 'x')

        cdf = self._margin(NU, TT.sqr(S), Z)

        return cdf 
开发者ID:JeanKossaifi,项目名称:copula_ordinal_regression,代码行数:25,代码来源:BASE.py


示例8: _build_marginal_likelihood_logp

 def _build_marginal_likelihood_logp(self, y, X, Xu, sigma):
     sigma2 = tt.square(sigma)
     Kuu = self.cov_func(Xu)
     Kuf = self.cov_func(Xu, X)
     Luu = cholesky(stabilize(Kuu))
     A = solve_lower(Luu, Kuf)
     Qffd = tt.sum(A * A, 0)
     if self.approx == "FITC":
         Kffd = self.cov_func(X, diag=True)
         Lamd = tt.clip(Kffd - Qffd, 0.0, np.inf) + sigma2
         trace = 0.0
     elif self.approx == "VFE":
         Lamd = tt.ones_like(Qffd) * sigma2
         trace = ((1.0 / (2.0 * sigma2)) *
                  (tt.sum(self.cov_func(X, diag=True)) -
                   tt.sum(tt.sum(A * A, 0))))
     else:  # DTC
         Lamd = tt.ones_like(Qffd) * sigma2
         trace = 0.0
     A_l = A / Lamd
     L_B = cholesky(tt.eye(Xu.shape[0]) + tt.dot(A_l, tt.transpose(A)))
     r = y - self.mean_func(X)
     r_l = r / Lamd
     c = solve_lower(L_B, tt.dot(A, r_l))
     constant = 0.5 * X.shape[0] * tt.log(2.0 * np.pi)
     logdet = 0.5 * tt.sum(tt.log(Lamd)) + tt.sum(tt.log(tt.diag(L_B)))
     quadratic = 0.5 * (tt.dot(r, r_l) - tt.dot(c, c))
     return -1.0 * (constant + logdet + quadratic + trace)
开发者ID:bballamudi,项目名称:pymc3,代码行数:28,代码来源:gp.py


示例9: apply

 def apply(self, input_vars):
     c = input_vars[0]
     if c.ndim == 1:
         ones = T.ones_like(c)
     else:
         ones = T.ones_like(c[:, 0])
     return -np.log(self.vec.num_types) * ones
开发者ID:futurulus,项目名称:colors-in-context,代码行数:7,代码来源:listener.py


示例10: test_gpujoin_gpualloc

def test_gpujoin_gpualloc():
    a = T.fmatrix('a')
    a_val = numpy.asarray(numpy.random.rand(4, 5), dtype='float32')
    b = T.fmatrix('b')
    b_val = numpy.asarray(numpy.random.rand(3, 5), dtype='float32')

    f = theano.function([a, b], T.join(0, T.zeros_like(a),T.ones_like(b)) + 4,
                        mode=mode_without_gpu)
    f_gpu = theano.function([a, b], T.join(0, T.zeros_like(a), T.ones_like(b)),
                            mode=mode_with_gpu)
    f_gpu2 = theano.function([a, b], T.join(0, T.zeros_like(a),
                                           T.ones_like(b)) + 4,
                             mode=mode_with_gpu)

    assert sum([node.op == T.alloc for node in f.maker.env.toposort()]) == 2
    assert sum([node.op == T.join for node in f.maker.env.toposort()]) == 1
    assert sum([node.op == B.gpu_alloc
                for node in f_gpu.maker.env.toposort()]) == 2
    assert sum([node.op == B.gpu_join
                for node in f_gpu.maker.env.toposort()]) == 1
    assert sum([node.op == B.gpu_alloc
                for node in f_gpu2.maker.env.toposort()]) == 2
    assert sum([node.op == B.gpu_join
                for node in f_gpu2.maker.env.toposort()]) == 1
    assert numpy.allclose(f(a_val, b_val), f_gpu2(a_val, b_val))
开发者ID:gexarcha,项目名称:Theano,代码行数:25,代码来源:test_basic_ops.py


示例11: build_model

 def build_model(self):
   print '\n... building the model with unroll=%d, backroll=%d' \
     % (self.source.unroll, self.source.backroll)
   x = T.imatrix('x')
   y = T.imatrix('y')
   reset = T.scalar('reset')
   hiddens = [h['init'] for h in self.hiddens.values()]
   outputs_info = [None] * 3 + hiddens
   [losses, probs, errors, hids], updates = \
     theano.scan(self.step, sequences=[x, y], outputs_info=outputs_info)
   loss = losses.sum()
   error = errors.sum() / T.cast((T.neq(y, 255).sum()), floatX)
   hidden_updates_train = []
   hidden_updates_test = []
   for h in self.hiddens.values():
     h_train = ifelse(T.eq(reset, 0), \
       hids[-1-self.source.backroll, :], T.ones_like(h['init']))
     h_test = ifelse(T.eq(reset, 0), \
       hids[-1, :], T.ones_like(h['init']))
     hidden_updates_train.append((h['init'], h_train))
     hidden_updates_test.append((h['init'], h_test))
   updates = self.source.get_updates(loss, self.sgd_params)
   updates += hidden_updates_train
   rets = [loss, probs[-1, :], error]
   mode = theano.Mode(linker='cvm')
   train_model = theano.function([x, y, reset, self.lr], rets, \
     updates=updates, mode=mode)
   test_model = theano.function([x, y, reset], rets, \
     updates=hidden_updates_test, mode=mode)
   return train_model, test_model
开发者ID:ivanhe,项目名称:rnn,代码行数:30,代码来源:model.py


示例12: forward_prop_step

        def forward_prop_step(x_t, dropmask_t, s_1_prev, s_2_prev):

            # Word Embeding layer
            x_e = E.dot(x_t.T)
            x_e = x_e.astype(theano.config.floatX)


            drop_mask = T.ones_like(U_update[0].astype(theano.config.floatX),dtype=theano.config.floatX)
            if regularization_type == RegularizationType.DROP_CONNECT:
                drop_mask = dropmask_t




            # GRU Layer 1
            update_gate_1 = T.nnet.hard_sigmoid((drop_mask * U_update[0]).dot(x_e) + W_update[0].dot(s_1_prev) + b_update[0])
            reset_gate_1 = T.nnet.hard_sigmoid((drop_mask * U_reset[0]).dot(x_e) + W_reset[0].dot(s_1_prev) + b_reset[0])
            c_1 = T.tanh((drop_mask * U_candidate[0]).dot(x_e) + W_candidate[0].dot(s_1_prev * reset_gate_1) + b_candidate[0])
            s_1 = (T.ones_like(update_gate_1) - update_gate_1) * c_1 + update_gate_1 * s_1_prev

            # GRU Layer 2
            update_gate_2 = T.nnet.hard_sigmoid((drop_mask * U_update[0]).dot(s_1) + W_update[0].dot(s_2_prev) + b_update[0])
            reset_gate_2 = T.nnet.hard_sigmoid((drop_mask * U_reset[0]).dot(s_1) + W_reset[0].dot(s_2_prev) + b_reset[0])
            c_2 = T.tanh((drop_mask * U_candidate[0]).dot(s_1) + W_candidate[0].dot(s_2_prev * reset_gate_2) + b_candidate[0])
            s_2 = (T.ones_like(update_gate_2) - update_gate_2) * c_2 + update_gate_2 * s_2_prev

            # Final output calculation
            # Theano's softmax returns a matrix with one row, we only need the row
            o_t = T.nnet.softmax(V.dot(s_2) + output_bias)[0]

            return [o_t, s_1, s_2]
开发者ID:samiraabnar,项目名称:GRU,代码行数:31,代码来源:RecursiveGRU2LwEmSentenceBased.py


示例13: __init__

    def __init__(self, optimizer_params, model_obj=None, X=None, Y=None, Y_aux=[], top_loss=None, params=None):
        print "Compiling RPROP..."
        super(compileRPROP, self).__init__(model_obj, X, Y, Y_aux, top_loss, params)

        self.LRs = []
        RPROP_updates = []

        # Initialise shared variables for the Training algos
        for i, para in enumerate(self.params):
            if para in self.params[:i]:
                print "Detected RNN or shared param @index =", i
            else:
                self.LRs.append(
                    theano.shared(
                        np.float32(optimizer_params["initial_update_size"])
                        * np.ones(para.get_value().shape, dtype="float32"),
                        name=para.name + str("_RPROP"),
                        borrow=0,
                    )
                )

        print "RPROP: missing backtracking handling "  ###TODO ???
        for param_i, grad_i, last_grad_i, pLR_i in zip(self.params, self.gradients, self.last_grads, self.LRs):
            # Commented code on next 4 lines is theano-incapable and just illustration!!!
            # if   ((last_grad_i*grad_i) < -1e-9): # sign-change & significant magnitude of last two gradients
            #   pLR_i_new = pLR_i * (1 - np.float32(RPROP_penalty)) # decrease this LR
            # elif ((last_grad_i*grad_i) > 1e-11): # no sign-change & and last two gradients were sufficiently big
            #   pLR_i_new = pLR_i * (1 + np.float32(RPORP_gain))    # increase this LR

            # capping RPROP-LR inside [1e-7,2e-3]
            RPROP_updates.append(
                (
                    pLR_i,
                    T.minimum(
                        T.maximum(
                            pLR_i
                            * (
                                1
                                - np.float32(optimizer_params["penalty"]) * ((last_grad_i * grad_i) < -1e-9)
                                + np.float32(optimizer_params["gain"]) * ((last_grad_i * grad_i) > 1e-11)
                            ),
                            1e-7 * T.ones_like(pLR_i),
                        ),
                        2e-3 * T.ones_like(pLR_i),
                    ),
                )
            )
            RPROP_updates.append(
                (param_i, param_i - pLR_i * grad_i / (T.abs_(grad_i) + 1e-6) - (self.weightdecay * param_i))
            )
            RPROP_updates.append((last_grad_i, grad_i))

        self.step = theano.function(
            [self.X, self.Y] + self.Y_aux,
            [self.top_loss, self.loss_instance],
            updates=RPROP_updates,
            on_unused_input="warn",
        )
        print " Compiling done  - in %.3f s!" % (time.time() - self.t_init)
开发者ID:ELEKTRONN,项目名称:ELEKTRONN,代码行数:59,代码来源:optimizer.py


示例14: __init__

 def __init__(self, gtype, alfa=0.02, ifreset=False, countmax=100):
     self._alfa = alfa
     self._gradsum = T.ones_like(gtype)
     self._gradsum_init = T.ones_like(gtype)
     # parameters for resetting _grad_sum
     self._ifreset = ifreset
     self._counter = 0
     self._countmax = countmax
开发者ID:sugaton,项目名称:theano_feedforwardNN,代码行数:8,代码来源:wordseg.py


示例15: sample

    def sample(self, alpha, beta):
        z_1 = super(BetaSample,
                    self).sample(alpha, T.ones_like(alpha))

        z_2 = super(BetaSample,
                    self).sample(beta, T.ones_like(beta))

        return z_1 / (z_1 + z_2)
开发者ID:Seb-Leb,项目名称:Tars,代码行数:8,代码来源:distribution_samples.py


示例16: adaptive_mask

def adaptive_mask(mask, black=0., ignore=0.5, white=1.):
    bw = ignore * T.ones_like(mask, dtype=floatX)
    t_black = black*T.ones_like(bw, dtype=floatX)
    t_white = white*T.ones_like(bw, dtype=floatX)
    white_idx = (mask > MASK["IGNORE"]).nonzero()
    black_idx = (mask < MASK["BACKGROUND_RING"]).nonzero()
    bw = T.set_subtensor(bw[white_idx], t_white[white_idx])
    bw = T.set_subtensor(bw[black_idx], t_black[black_idx])
    return bw
开发者ID:berleon,项目名称:deepdecoder,代码行数:9,代码来源:utils.py


示例17: get_model

    def get_model(self,X, Y, x_test):
        '''
        Gaussian Process Regression model.
        Reference: C.E. Rasmussen, "Gaussian Process for Machine Learning", MIT Press 2006

        Args:
            X: tensor matrix, training data
            Y: tensor matrix, training target
            x_test: tensor matrix, testing data
        
        Returns:
            K: prior cov matrix
            Ks: prior joint cov matrix
            Kss: prior cov matrix for testing data
            Posterior Distribution:
                alpha: alpha = inv(K)*(mu-m)
                sW: vector containing diagonal of sqrt(W)
                L: L = chol(sW*K*sW+eye(n))
            y_test_mu: predictive mean
            y_test_var: predictive variance
            fs2: predictive latent variance
        Note: the cov matrix inverse is computed through Cholesky factorization
        https://makarandtapaswi.wordpress.com/2011/07/08/cholesky-decomposition-for-matrix-inversion/
        '''
        # Compute GP prior distribution: mean and covariance matrices (eq 2.13, 2.14)
        K = self.covFunc(X,X,'K') # pior cov
        #m = T.mean(Y)*T.ones_like(Y) # pior mean
        m = self.mean*T.ones_like(Y) # pior mean

        # Compute GP joint prior distribution between training and test (eq 2.18)
        Ks = self.covFunc(X,x_test,'Ks')
        # Pay attention!! here is the self test cov matrix.
        Kss = self.covFunc(x_test,x_test,'Kss',mode='self_test')

        # Compute posterior distribution with noise: L,alpha,sW,and log_likelihood.
        sn2 = T.exp(2*self.sigma_n) # noise variance of likGauss
        L = sT.cholesky(K/sn2 + T.identity_like(K))
        sl = sn2
        alpha = T.dot(sT.matrix_inverse(L.T), 
                      T.dot(sT.matrix_inverse(L), (Y-m)) ) / sl
        sW = T.ones_like(T.sum(K,axis=1)).reshape((K.shape[0],1)) / T.sqrt(sl)
        log_likelihood = T.sum(-0.5 * (T.dot((Y-m).T, alpha)) - T.sum(T.log(T.diag(L))) - X.shape[0] / 2 * T.log(2.*np.pi*sl))
        
        
        # Compute predictive distribution using the computed posterior distribution.
        fmu = m + T.dot(Ks.T, alpha) # Prediction Mu fs|f, eq 2.25 
        V = T.dot(sT.matrix_inverse(L),T.extra_ops.repeat(sW,x_test.shape[0],axis=1)*Ks)
        fs2 = Kss - (T.sum(V*V,axis=0)).reshape((1,V.shape[1])).T # Predication Sigma, eq 2.26
        fs2 = T.maximum(fs2,0) # remove negative variance noise
        #fs2 = T.sum(fs2,axis=1) # in case x has multiple dimensions

        y_test_mu = fmu
        y_test_var = fs2 + sn2

        return K, Ks, Kss, y_test_mu, y_test_var, log_likelihood, L, alpha,V, fs2,sW
开发者ID:shenxudeu,项目名称:gp_theano,代码行数:55,代码来源:gptheano_model.py


示例18: _alignData

 def _alignData(self, w_t, sv_tm1):
     # padding dummy element 
     mask = T.concatenate([T.ones_like(sv_tm1),
         T.ones_like(sv_tm1[:,-1:])],axis=1)
     # iterate over batch 
     mask,_ = theano.scan(fn=self._batchAlign,
             sequences=[w_t,mask],
             outputs_info=None)
     # mask the slot-value vector
     sv_t = mask[:,:-1] * sv_tm1
     return sv_t
开发者ID:jtraviesor,项目名称:tf-playground,代码行数:11,代码来源:hlstm.py


示例19: _step

        def _step(m_, x_, h_):
            preact = T.dot(h_, self.U)
            preact += x_

            z = T.nnet.sigmoid(_slice(preact, 0, hidden_size))
            r = T.nnet.sigmoid(_slice(preact, 1, hidden_size))
            c = T.tanh(_slice(preact, 2, hidden_size) * r + (T.ones_like(r) - r) * _slice(x_, 2, hidden_size))

            h = (T.ones_like(z) - z) * c + z * h_
            h = m_[:, None] * h + (1. - m_)[:, None] * h_

            return h
开发者ID:Air-Fighter,项目名称:NN4Gaokao,代码行数:12,代码来源:layers.py


示例20: gibbs_iteration

 def gibbs_iteration(g1, s1, h1, t1, v):
     if mean_field:
         g2 = self.g_given_htv(h1, t1, v) 
         s2 = self.s_given_ghtv(T.ones_like(g2), h1, t1, v)
         h2 = self.h_given_gsv(g2, s2, v)
         t2 = self.t_given_gshv(g2, s2, T.ones_like(h2), v)
     else:
         g2 = self.sample_g_given_htv(h1, t1, v) 
         s2 = self.sample_s_given_ghtv(g2, h1, t1, v)
         h2 = self.sample_h_given_gsv(g2, s2, v)
         t2 = self.sample_t_given_gshv(g2, s2, h2, v)
     return [g2, s2, h2, t2]
开发者ID:gdesjardins,项目名称:hossrbm_public,代码行数:12,代码来源:hossrbm_gsht.py



注:本文中的theano.tensor.ones_like函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。


鲜花

握手

雷人

路过

鸡蛋
该文章已有0人参与评论

请发表评论

全部评论

专题导读
上一篇:
Python tensor.or_函数代码示例发布时间:2022-05-27
下一篇:
Python tensor.ones函数代码示例发布时间:2022-05-27
热门推荐
阅读排行榜

扫描微信二维码

查看手机版网站

随时了解更新最新资讯

139-2527-9053

在线客服(服务时间 9:00~18:00)

在线QQ客服
地址:深圳市南山区西丽大学城创智工业园
电邮:jeky_zhao#qq.com
移动电话:139-2527-9053

Powered by 互联科技 X3.4© 2001-2213 极客世界.|Sitemap