• 设为首页
  • 点击收藏
  • 手机版
    手机扫一扫访问
    迪恩网络手机版
  • 关注官方公众号
    微信扫一扫关注
    公众号

Python tensor.tanh函数代码示例

原作者: [db:作者] 来自: [db:来源] 收藏 邀请

本文整理汇总了Python中theano.tensor.tanh函数的典型用法代码示例。如果您正苦于以下问题:Python tanh函数的具体用法?Python tanh怎么用?Python tanh使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。



在下文中一共展示了tanh函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。

示例1: __call__

    def __call__(self, x, h, prev_cell):
        z = x.dot(self.W_x) + h.dot(self.U_h) + self.b

        def _get_unit(matrix, unit, dim):
            slice_num = self.units[unit]
            # assume all slices have the same dimension
            return matrix[:, slice_num * dim: (slice_num + 1) * dim]

        # input gate
        i = T.nnet.sigmoid(_get_unit(z, 'i', self.unit_size))

        # candidate memory cell
        candidate = T.tanh(_get_unit(z, 'c', self.unit_size))

        # forget gate
        f = T.nnet.sigmoid(_get_unit(z, 'f', self.unit_size))

        # output gate (note it doesn't involve the current memory cell)
        o = T.nnet.sigmoid(_get_unit(z, 'o', self.unit_size))

        next_cell = i * candidate + f * prev_cell

        h = o * T.tanh(next_cell)

        return [next_cell, h]
开发者ID:txd866,项目名称:rl-curriculum,代码行数:25,代码来源:layers.py


示例2: tanh_actfun

def tanh_actfun(x, scale=None):
    """Compute  rescaled tanh activation for x."""
    if scale is None:
        x_tanh = T.tanh(x)
    else:
        x_tanh = scale * T.tanh(constFX(1/scale) * x)
    return x_tanh
开发者ID:Philip-Bachman,项目名称:Sequential-Generation,代码行数:7,代码来源:NetLayers.py


示例3: generate

    def generate(self, h_, c_, x_):
        h_a = []
        c_a = []
        for it in range(self.n_levels):
            preact = T.dot(x_, self.W[it])
            preact += T.dot(h_[it], self.U[it]) + self.b[it]

            i = T.nnet.sigmoid(self.slice(preact, 0, self.n_dim))
            f = T.nnet.sigmoid(self.slice(preact, 1, self.n_dim))
            o = T.nnet.sigmoid(self.slice(preact, 2, self.n_dim))
            c = T.tanh(self.slice(preact, 3, self.n_dim))

            c = f * c_[it] + i * c
            h = o * T.tanh(c)

            h_a.append(h)
            c_a.append(c)

            x_ = h

        q = T.dot(h, self.L) + self.b0
        # mask = T.concatenate([T.alloc(np_floatX(1.), q.shape[0] - 1), T.alloc(np_floatX(0.), 1)])
        prob = T.nnet.softmax(q / 1)

        return prob, T.stack(h_a).squeeze(), T.stack(c_a)[0].squeeze()
开发者ID:velicue,项目名称:char-rnn-theano,代码行数:25,代码来源:lstm.py


示例4: step

  def step(self, i_t, x_t, z_t, att_p, y_p, c_p, *other_args):
    # See Unit.scan() for seqs.
    # args: seqs (x_t = unit.xc, z_t, i_t), outputs (# unit.n_act, y_p, c_p, ...), non_seqs (none)
    other_outputs = []
    #att_p = theano.printing.Print('att in lstms', attrs=['__str__'])(att_p)
    if self.recurrent_transform:
      state_vars = other_args[:len(self.recurrent_transform.state_vars)]
      self.recurrent_transform.set_sorted_state_vars(state_vars)
      z_r, r_updates = self.recurrent_transform.step(y_p)
      z_t += z_r
      for v in self.recurrent_transform.get_sorted_state_vars():
        other_outputs += [r_updates[v]]
    maxatt = att_p.repeat(z_t.shape[1]).reshape((z_t.shape[0],z_t.shape[1]))#.dimshuffle(1,0)
    #maxatt = theano.printing.Print('maxatt',attrs=['__str__','shape'])(maxatt)
    z_t = T.switch(maxatt>0,z_t,z_t + T.dot(y_p, self.W_re))
    #z_t += T.dot(y_p, self.W_re)
    #z_t = theano.printing.Print('z_t lstms',attrs=['shape'])(z_t)

    partition = z_t.shape[1] // 4
    ingate = T.nnet.sigmoid(z_t[:,:partition])
    forgetgate = ((T.nnet.sigmoid(z_t[:,partition:2*partition])).T * (1.-att_p)).T
    outgate = T.nnet.sigmoid(z_t[:,2*partition:3*partition])
    input = T.tanh(z_t[:,3*partition:4*partition])
    #c_t = ((forgetgate * c_p + ingate * input).T * (1.-T.max(att_p,axis=-1))).T
    c_t = forgetgate * c_p + ingate * input
    y_t = outgate * T.tanh(c_t)
    i_output = T.outer(i_t, self.o_output)
    i_h = T.outer(i_t, self.o_h)
    # return: next outputs (# unit.n_act, y_t, c_t, ...)
    return (y_t * i_output, c_t * i_h + c_p * (1 - i_h)) + tuple(other_outputs)
开发者ID:rwth-i6,项目名称:returnn,代码行数:30,代码来源:NetworkRecurrentLayer.py


示例5: recurrence

 def recurrence(x_t, c_tm1, h_tm1):
     i_t = T.nnet.sigmoid(T.dot(x_t, self.w_xi) + T.dot(h_tm1, self.w_hi) + self.b_i)  # + T.dot(c_tm1, self.w_ci)
     f_t = T.nnet.sigmoid(T.dot(x_t, self.w_xf) + T.dot(h_tm1, self.w_hf) + self.b_f)  # + T.dot(c_tm1, self.w_cf)
     c_t = f_t * c_tm1 + i_t * T.tanh(T.dot(x_t, self.w_xc) + T.dot(h_tm1, self.w_hc) + self.b_c)
     o_t = T.nnet.sigmoid(T.dot(x_t, self.w_xo) + T.dot(h_tm1, self.w_ho) + self.b_o)  # + T.dot(c_t, self.w_co)
     h_t = o_t * T.tanh(c_t)
     return [c_t, h_t]
开发者ID:geshiming,项目名称:UltraDeep,代码行数:7,代码来源:network.py


示例6: step

    def step(x_t, m, h_tm1, c_tm1, ctx_t, att, pctx_):
        projected_state = T.dot(h_tm1, Wd_att)
        pctx_ = T.tanh(pctx_ + projected_state[None, :, :])
        new_att = T.dot(pctx_, U_att) + c_att
        new_att = new_att.reshape([new_att.shape[0], new_att.shape[1]])
        new_att = T.exp(new_att) * context_mask
        new_att = new_att / new_att.sum(axis=0, keepdims=True)
        # Current context
        ctx_t = (context * new_att[:, :, None]).sum(axis=0)

        preactivation = T.dot(h_tm1, U)
        preactivation += x_t
        preactivation += T.dot(ctx_t, Wc)

        i_t = T.nnet.sigmoid(_slice(preactivation, 0, hidden_size))
        f_t = T.nnet.sigmoid(_slice(preactivation, 1, hidden_size))
        o_t = T.nnet.sigmoid(_slice(preactivation, 2, hidden_size))
        c_t = T.tanh(_slice(preactivation, 3, hidden_size))

        c_t = f_t * c_tm1 + i_t * c_t
        c_t = m[:, None] * c_t + (1. - m)[:, None] * c_tm1
        h_t = o_t * T.tanh(c_t)
        h_t = m[:, None] * h_t + (1. - m)[:, None] * h_tm1
        return (h_t, c_t, ctx_t, new_att.T, projected_state,
                i_t, f_t, o_t, preactivation)
开发者ID:nlkim0817,项目名称:minet,代码行数:25,代码来源:net.py


示例7: forward_prop_step

        def forward_prop_step(x_t, s_t1_prev, s_t2_prev):
            ''' Inner function encapsulating a propagation step
            This is how we calculated the hidden state in a simple RNN. No longer!
            s_t = T.tanh(U[:,x_t] + W.dot(s_t1_prev))
            '''
            # Word embedding layer
            x_e = E[:,x_t]
            
            # GRU Layer 1
            z_t1 = T.nnet.hard_sigmoid(U[0].dot(x_e) + W[0].dot(s_t1_prev) + b[0])
            r_t1 = T.nnet.hard_sigmoid(U[1].dot(x_e) + W[1].dot(s_t1_prev) + b[1])
            c_t1 = T.tanh(U[2].dot(x_e) + W[2].dot(s_t1_prev * r_t1) + b[2])
            s_t1 = (T.ones_like(z_t1) - z_t1) * c_t1 + z_t1 * s_t1_prev
            
            # GRU Layer 2
            z_t2 = T.nnet.hard_sigmoid(U[3].dot(s_t1) + W[3].dot(s_t2_prev) + b[3])
            r_t2 = T.nnet.hard_sigmoid(U[4].dot(s_t1) + W[4].dot(s_t2_prev) + b[4])
            c_t2 = T.tanh(U[5].dot(s_t1) + W[5].dot(s_t2_prev * r_t2) + b[5])
            s_t2 = (T.ones_like(z_t2) - z_t2) * c_t2 + z_t2 * s_t2_prev
            
            # Final output calculation
            # Theano's softmax returns a matrix with one row, we only need the row
            o_t = T.nnet.softmax(V.dot(s_t2) + c)[0]

            return [o_t, s_t1, s_t2]
开发者ID:BaluJr,项目名称:ProgramAnalysisProject,代码行数:25,代码来源:gru_theano.py


示例8: _step

    def _step(x_, xb_, h_, c_, hb_, cb_):
        preact = T.dot(h_, tparams[_p(prefix, 'U')])
        preact += T.dot(x_, tparams[_p(prefix, 'W')])
        preact += tparams[_p(prefix, 'b')]

        i = T.nnet.sigmoid(_slice(preact, 0, options['dim_proj']))
        f = T.nnet.sigmoid(_slice(preact, 1, options['dim_proj']))
        o = T.nnet.sigmoid(_slice(preact, 2, options['dim_proj']))
        c = T.tanh(_slice(preact, 3, options['dim_proj']))

        c = f * c_ + i * c
        h = o * T.tanh(c)
        
        preactb = T.dot(hb_, tparams[_p(prefix, 'Ub')])
        preactb += T.dot(xb_, tparams[_p(prefix, 'Wb')])
        preactb += tparams[_p(prefix, 'bb')]

        ib = T.nnet.sigmoid(_slice(preactb, 0, options['dim_proj']))
        fb = T.nnet.sigmoid(_slice(preactb, 1, options['dim_proj']))
        ob = T.nnet.sigmoid(_slice(preactb, 2, options['dim_proj']))
        cb = T.tanh(_slice(preactb, 3, options['dim_proj']))

        cb = fb * cb_ + ib * cb
        hb = ob * T.tanh(cb)
        
        # take the reverse of hb and concatenate with h before feeding into logistic regression
        hhb = T.concatenate([h,hb[::-1]])
        # a single frame prediction given h - the posterior probablity
        one_pred = T.nnet.softmax(T.dot(hhb, tparams['U']) + tparams['b'])
        
        return h, c, hb, cb, one_pred
开发者ID:tangkk,项目名称:tangkk-mirex-ace,代码行数:31,代码来源:blstmrnn.py


示例9: theano_setup

    def theano_setup(self):
    
        # The matrices Wb and Wc were originally tied.
        # Because of that, I decided to keep Wb and Wc with
        # the same shape (instead of being transposed) to
        # avoid disturbing the code as much as possible.

        Wb = T.dmatrix('Wb')
        Wc = T.dmatrix('Wc')
        b = T.dvector('b')
        c = T.dvector('c')
        s = T.dscalar('s')
        x = T.dmatrix('x')
    
        h_act = T.dot(x, Wc) + c
        if self.act_func[0] == 'tanh':
            h = T.tanh(h_act)
        elif self.act_func[0] == 'sigmoid':
            h = T.nnet.sigmoid(h_act)
        elif self.act_func[0] == 'id':
            # bad idae
            h = h_act
        else:
            raise("Invalid act_func[0]")

        r_act = T.dot(h, Wb.T) + b
        if self.act_func[1] == 'tanh':
            r = s * T.tanh(r_act)
        elif self.act_func[1] == 'sigmoid':
            r = s * T.nnet.sigmoid(r_act)
        elif self.act_func[1] == 'id':
            r = s * r_act
        else:
            raise("Invalid act_func[1]")


        # Another variable to be able to call a function
        # with a noisy x and compare it to a reference x.
        y = T.dmatrix('y')

        loss = ((r - y)**2)
        sum_loss = T.sum(loss)
        
        # theano_encode_decode : vectorial function in argument X.
        # theano_loss : vectorial function in argument X.
        # theano_gradients : returns triplet of gradients, each of
        #                    which involves the all data X summed
        #                    so it's not a "vectorial" function.

        self.theano_encode_decode = function([Wb,Wc,b,c,s,x], r)
        self.theano_loss = function([Wb,Wc,b,c,s,x,y], loss)

        self.theano_gradients = function([Wb,Wc,b,c,s,x,y],
                                         [T.grad(sum_loss, Wb), T.grad(sum_loss, Wc),
                                          T.grad(sum_loss, b),  T.grad(sum_loss, c),
                                          T.grad(sum_loss, s)])
        # other useful theano functions for the experiments that involve
        # adding noise to the hidden states
        self.theano_encode = function([Wc,c,x], h)
        self.theano_decode = function([Wb,b,s,h], r)
开发者ID:gyom,项目名称:denoising_autoencoder,代码行数:60,代码来源:dae_untied_weights.py


示例10: get_ht_ct

 def get_ht_ct(self, xWxi_t, xWxf_t, xWxc_t, xWxo_t, h_t1, c_t1):
     i_t = T.nnet.sigmoid(xWxi_t + h_t1.dot(self.Whi) + c_t1.dot(self.Wci) + self.bi)
     f_t = T.nnet.sigmoid(xWxf_t + h_t1.dot(self.Whf) + c_t1.dot(self.Wcf) + self.bf)
     c_t = f_t * c_t1 + i_t * T.tanh(xWxc_t + h_t1.dot(self.Whc) + self.bc)
     o_t = T.nnet.sigmoid(xWxo_t + h_t1.dot(self.Who) + c_t.dot(self.Wco) + self.bo)
     h_t = o_t * T.tanh(c_t)
     return h_t, c_t
开发者ID:ShuvenduBikash,项目名称:machine_learning_examples,代码行数:7,代码来源:batch_units.py


示例11: forward_step

        def forward_step(x_t, prev_state, prev_content, prev_state_2, prev_content_2):
            input_gate = T.nnet.hard_sigmoid(T.dot((self.U_input), x_t) + T.dot(self.W_input, prev_state) + self.bias_input)
            forget_gate = T.nnet.hard_sigmoid(
                T.dot((self.U_forget), x_t) + T.dot(self.W_forget, prev_state) + self.bias_forget)
            output_gate = T.nnet.hard_sigmoid(
                T.dot((self.U_output), x_t) + T.dot(self.W_output, prev_state) + self.bias_output)

            stabilized_input = T.tanh(T.dot((self.U), x_t) + T.dot(self.W, prev_state) + self.bias)
            c = forget_gate * prev_content + input_gate * stabilized_input
            s1 = output_gate * T.tanh(c)

            input_gate_2 = T.nnet.hard_sigmoid(
                T.dot((self.U_input_2), s1) + T.dot(self.W_input_2, prev_state_2) + self.bias_input_2)
            forget_gate_2 = T.nnet.hard_sigmoid(
                T.dot((self.U_forget_2), s1) + T.dot(self.W_forget_2, prev_state_2) + self.bias_forget_2)
            output_gate_2 = T.nnet.hard_sigmoid(
                T.dot((self.U_output_2), s1) + T.dot(self.W_output_2, prev_state_2) + self.bias_output_2)

            stabilized_input_2 = T.tanh(T.dot((self.U_2), s1) + T.dot(self.W_2, prev_state_2) + self.bias_2)

            c2 = forget_gate_2 * prev_content_2 + input_gate_2 * stabilized_input_2

            s2 = output_gate_2 * T.tanh(c2)
            o = T.nnet.sigmoid(T.dot(self.O_w, s2) + self.O_bias)

            return [o, s1, c, s2, c2, input_gate, forget_gate, output_gate]
开发者ID:samiraabnar,项目名称:LSTM,代码行数:26,代码来源:LSTM2Layer.py


示例12: _step

    def _step(self,y_tm1,s_tm1,h):
        
        # attention
         
        pctx__=T.dot(h,self.W_ha)+T.dot(s_tm1,self.W_sa)
        
        #pctx__+=T.dot(y_t,self.W_yc)
        
        e=T.exp(T.tanh(pctx__))
        
        #e=T.dot(pctx__,self.U_z)
        
        e=e/e.sum(0, keepdims=True)
        
        c=T.dot(e.T,h)

        #c=(h*e[:,:,None]).sum(0)

        z = T.tanh(T.dot(y_tm1, self.W_z) + self.b_z + T.dot(s_tm1, self.U_z)+T.dot(c,self.W_cs))
        r = T.tanh(T.dot(y_tm1, self.W_r) + self.b_r + T.dot(s_tm1, self.U_r)+T.dot(c,self.W_cs))
        hh_t = T.tanh(T.dot(y_tm1, self.W_h) + self.b_h + T.dot(r * s_tm1, self.U_h)+T.dot(c,self.W_cy))
        s_t = z * s_tm1 + (1 - z) * hh_t
        
        logit=T.tanh(T.dot(s_t, self.W_hl)+T.dot(y_tm1, self.W_yl)+T.dot(c, self.W_cl))
        
        return T.cast(s_t,dtype =theano.config.floatX),logit  
开发者ID:chuckgu,项目名称:RNN,代码行数:26,代码来源:Layers.py


示例13: convolutional_model

def convolutional_model(X, w_1, w_2, w_3, w_4, w_5, w_6, p_1, p_2, p_3, p_4, p_5):
    l1 = dropout(T.tanh( max_pool_2d(T.maximum(conv2d(X, w_1, border_mode='full'),0.), (2, 2),ignore_border=True) + b_1.dimshuffle('x', 0, 'x', 'x') ), p_1)
    l2 = dropout(T.tanh( max_pool_2d(T.maximum(conv2d(l1, w_2), 0.), (2, 2),ignore_border=True) + b_2.dimshuffle('x', 0, 'x', 'x') ), p_2)
    l3 = dropout(T.flatten(T.tanh( max_pool_2d(T.maximum(conv2d(l2, w_3), 0.), (2, 2),ignore_border=True) + b_3.dimshuffle('x', 0, 'x', 'x') ), outdim=2), p_3)# flatten to switch back to 1d layers
    l4 = dropout(T.maximum(T.dot(l3, w_4), 0.), p_4)
    l5 = dropout(T.maximum(T.dot(l4, w_5), 0.), p_5)
    return T.dot(l5, w_6)
开发者ID:mjasek114,项目名称:w207-Kaggle-Facial-Keypoint-Detection,代码行数:7,代码来源:ConvoNN_V3.py


示例14: step

    def step(x, prev_cell, prev_hidden):
        transformed_x = T.dot(x, P[name_W_input])

        x_i = transformed_x[0 * hidden_size : 1 * hidden_size]
        x_f = transformed_x[1 * hidden_size : 2 * hidden_size]
        x_c = transformed_x[2 * hidden_size : 3 * hidden_size]
        x_o = transformed_x[3 * hidden_size : 4 * hidden_size]

        transformed_hid = T.dot(prev_hidden, P[name_W_hidden])
        h_i = transformed_hid[0 * hidden_size : 1 * hidden_size]
        h_f = transformed_hid[1 * hidden_size : 2 * hidden_size]
        h_c = transformed_hid[2 * hidden_size : 3 * hidden_size]
        h_o = transformed_hid[3 * hidden_size : 4 * hidden_size]

        transformed_cell = T.dot(prev_cell, V_if)
        c_i = transformed_cell[0 * hidden_size : 1 * hidden_size]
        c_f = transformed_cell[1 * hidden_size : 2 * hidden_size]

        in_lin = x_i + h_i + b_i + c_i
        forget_lin = x_f + h_f + b_f + c_f
        cell_lin = x_c + h_c + b_c

        in_gate = T.nnet.sigmoid(in_lin)
        forget_gate = T.nnet.sigmoid(forget_lin)
        cell_updates = T.tanh(cell_lin)

        cell = forget_gate * prev_cell + in_gate * cell_updates

        out_lin = x_o + h_o + b_o + T.dot(cell, V_o)
        out_gate = T.nnet.sigmoid(out_lin)

        hid = out_gate * T.tanh(cell)
        return cell, hid
开发者ID:ml-lab,项目名称:neural-transducers,代码行数:33,代码来源:lstm.py


示例15: pass_edges

        def pass_edges(input_idx_t, edge_t, edge_mask_t, counter_t, h_tm1, c_tm1, x):
            h_t = h_tm1
            c_t = c_tm1
            # select the input vector to use for this edge (source)
            x_t_i = x[input_idx_t, :]
            # zero out the input unless this is a leaf node
            x_t_0 = T.switch(T.eq(T.sum(edge_mask_t), 0), x_t_i, x_t_i*0)
            # concatenate with the input edge vector
            x_t_edge = T.concatenate([x_t_0, edge_t])

            # compute attention weights, using a manual softmax
            attention_scores = T.dot(self.v_a, T.tanh(T.dot(self.W_h_a, h_tm1))) # (1, n_edges)
            # find the max of the unmasked values
            max_score = T.max(attention_scores + edge_mask_t * 10000.0) - 10000.0
            # exponentiate the differences, masking first to avoid inf, and then to keep only relevant scores
            exp_scores = T.exp((attention_scores - max_score) * edge_mask_t) * edge_mask_t
            # take the sum, and add one if the mask is all zeros to avoid an inf
            exp_scores_sum = T.sum(exp_scores) + T.switch(T.eq(T.sum(edge_mask_t), 0), 1.0, 0.0)
            # normalize to compute the weights
            weighted_mask = exp_scores / exp_scores_sum

            i_t = T.nnet.sigmoid(T.dot(x_t_edge, self.W_x_i) + T.sum(T.dot(self.W_h_i.T, (weighted_mask * h_tm1)).T, axis=0) + self.b_h_i)
            f_t = T.nnet.sigmoid(T.dot(x_t_edge, self.W_x_f) + T.sum(T.dot(self.W_h_f.T, (weighted_mask * h_tm1)).T, axis=0) + self.b_h_f)
            o_t = T.nnet.sigmoid(T.dot(x_t_edge, self.W_x_o) + T.sum(T.dot(self.W_h_o.T, (weighted_mask * h_tm1)).T, axis=0) + self.b_h_o)
            u_t = T.tanh(T.dot(x_t_edge, self.W_x_u) + T.sum(T.dot(self.W_h_u.T, (weighted_mask * h_tm1)).T, axis=0) + self.b_h_u)

            c_temp = i_t * u_t + f_t * T.sum((weighted_mask * c_tm1).T, axis=0)
            h_temp = o_t * T.tanh(c_temp)

            h_t = T.set_subtensor(h_t[:, counter_t], h_temp)
            c_t = T.set_subtensor(c_t[:, counter_t], c_temp)
            return h_t, c_t
开发者ID:dallascard,项目名称:guac,代码行数:32,代码来源:dytree.py


示例16: step

	def step(x,prev_h,prev_c):
		input_gate = T.nnet.sigmoid(
				T.dot(x,P.W_input_in) +\
				T.dot(prev_h,P.W_hidden_in) +\
				T.dot(prev_c,P.W_cell_in) +\
				P.b_in
			)

		forget_gate = T.nnet.sigmoid(
				T.dot(x,P.W_input_forget) +\
				T.dot(prev_h,P.W_hidden_forget) +\
				T.dot(prev_c,P.W_cell_forget) +\
				P.b_forget
			)

		curr_c = forget_gate * prev_c + input_gate * T.tanh(
				T.dot(x,P.W_input_cell) +\
				T.dot(prev_h,P.W_hidden_cell) +\
				P.b_cell
			)

		output_gate = T.nnet.sigmoid(
				T.dot(x,P.W_input_output) +\
				T.dot(prev_h,P.W_hidden_output) +\
				T.dot(curr_c,P.W_cell_output) +\
				P.b_output
			)
		
		curr_h = output_gate * T.tanh(curr_c)

		return curr_h,curr_c
开发者ID:andersonhaynes,项目名称:theano-nlp-1,代码行数:31,代码来源:lstm_lang_model.py


示例17: fprop_step_mask

    def fprop_step_mask(self, state_below, mask, state_before, U):
        """
        Scan function for case using masks

        Parameters
        ----------
        : todo
        state_below : TheanoTensor
        """

        g_on = state_below + tensor.dot(state_before[:, :self.dim], U)
        i_on = tensor.nnet.sigmoid(g_on[:, :self.dim])
        f_on = tensor.nnet.sigmoid(g_on[:, self.dim:2*self.dim])
        o_on = tensor.nnet.sigmoid(g_on[:, 2*self.dim:3*self.dim])

        z = tensor.set_subtensor(state_before[:, self.dim:],
                                 f_on * state_before[:, self.dim:] +
                                 i_on * tensor.tanh(g_on[:, 3*self.dim:]))
        z = tensor.set_subtensor(z[:, :self.dim],
                                 o_on * tensor.tanh(z[:, self.dim:]))

        # Only update the state for non-masked data, otherwise
        # just carry on the previous state until the end
        z = mask[:, None] * z + (1 - mask[:, None]) * state_before

        return z
开发者ID:dwf,项目名称:pylearn2,代码行数:26,代码来源:rnn.py


示例18: b_step_lstm

 def b_step_lstm(x_t, h_tm1, c_tm1):
     i_t = T.nnet.sigmoid(T.dot(x_t, self.W_xi_b) + T.dot(h_tm1, self.W_hi_b) + self.b_i_b)
     f_t = T.nnet.sigmoid(T.dot(x_t, self.W_xf_b) + T.dot(h_tm1, self.W_hf_b) + self.b_f_b)
     c_t = f_t * c_tm1 + i_t * T.tanh(T.dot(x_t, self.W_xc_b) + T.dot(h_tm1, self.W_hc_b) + self.b_c_b)
     o_t = T.nnet.sigmoid(T.dot(x_t, self.W_xo_b) + T.dot(h_tm1, self.W_ho_b) + self.b_o_b)
     h_t = o_t * T.tanh(c_t)
     return [h_t, c_t]
开发者ID:Seleucia,项目名称:RNNPose,代码行数:7,代码来源:blstmnp.py


示例19: step_fn

    def step_fn(current_input_to_state, prev_c, prev_h):
        # all args have shape (batch size, output_dim, height)

        # TODO consider learning this padding
        prev_h_padded = T.zeros((batch_size, output_dim, 1+height), dtype=theano.config.floatX)
        prev_h_padded = T.inc_subtensor(prev_h_padded[:,:,1:], prev_h)

        state_to_state = lib.ops.conv1d.Conv1D(
            name+'.StateToState', 
            output_dim, 
            4*output_dim, 
            2, 
            prev_h_padded, 
            biases=False
        )

        gates = current_input_to_state + state_to_state

        o_f_i = T.nnet.sigmoid(gates[:,:3*output_dim,:])
        o = o_f_i[:,0*output_dim:1*output_dim,:]
        f = o_f_i[:,1*output_dim:2*output_dim,:]
        i = o_f_i[:,2*output_dim:3*output_dim,:]
        g = T.tanh(gates[:,3*output_dim:4*output_dim,:])

        new_c = (f * prev_c) + (i * g)
        new_h = o * T.tanh(new_c)

        return (new_c, new_h)
开发者ID:igul222,项目名称:image_generation,代码行数:28,代码来源:diagonal_bilstm.py


示例20: _step

        def _step(c, c_m, hidden, c_matrix):
            node_idx = c[:, 0]
            left_child_idx = c[:, 1]
            right_child_idx = c[:, 2]

            all_samples = T.arange(n_samples)
            recursive = (
                T.dot(hidden[left_child_idx, all_samples, :], self.W)
                + T.dot(hidden[right_child_idx, all_samples, :], self.U)
                + self.b
            )

            i = T.nnet.sigmoid(_slice(recursive, 0, self.dim_proj))
            f1 = T.nnet.sigmoid(_slice(recursive, 1, self.dim_proj))
            f2 = T.nnet.sigmoid(_slice(recursive, 2, self.dim_proj))
            o = T.nnet.sigmoid(_slice(recursive, 3, self.dim_proj))
            c_prime = T.tanh(_slice(recursive, 4, self.dim_proj))

            new_c = (
                i * c_prime
                + f1 * c_matrix[left_child_idx, all_samples, :]
                + f2 * c_matrix[right_child_idx, all_samples, :]
            )

            new_c_masked = c_m[:, None] * new_c + (1.0 - c_m[:, None]) * c_matrix[node_idx, all_samples, :]

            new_h = o * T.tanh(new_c_masked)
            new_h_masked = c_m[:, None] * new_h + (1.0 - c_m[:, None]) * hidden[node_idx, all_samples, :]

            return (
                T.set_subtensor(hidden[node_idx, all_samples], new_h_masked),
                T.set_subtensor(c_matrix[node_idx, all_samples], new_c_masked),
            )
开发者ID:zjh-nudger,项目名称:lstm_tree_theano_1,代码行数:33,代码来源:lstm.py



注:本文中的theano.tensor.tanh函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。


鲜花

握手

雷人

路过

鸡蛋
该文章已有0人参与评论

请发表评论

全部评论

专题导读
上一篇:
Python tensor.tensor函数代码示例发布时间:2022-05-27
下一篇:
Python tensor.switch函数代码示例发布时间:2022-05-27
热门推荐
阅读排行榜

扫描微信二维码

查看手机版网站

随时了解更新最新资讯

139-2527-9053

在线客服(服务时间 9:00~18:00)

在线QQ客服
地址:深圳市南山区西丽大学城创智工业园
电邮:jeky_zhao#qq.com
移动电话:139-2527-9053

Powered by 互联科技 X3.4© 2001-2213 极客世界.|Sitemap