• 设为首页
  • 点击收藏
  • 手机版
    手机扫一扫访问
    迪恩网络手机版
  • 关注官方公众号
    微信扫一扫关注
    公众号

Python functional.sigmoid函数代码示例

原作者: [db:作者] 来自: [db:来源] 收藏 邀请

本文整理汇总了Python中torch.nn.functional.sigmoid函数的典型用法代码示例。如果您正苦于以下问题:Python sigmoid函数的具体用法?Python sigmoid怎么用?Python sigmoid使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。



在下文中一共展示了sigmoid函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。

示例1: PeepholeLSTMCell

def PeepholeLSTMCell(input: torch.Tensor,
                     hidden: Tuple[torch.Tensor, torch.Tensor],
                     w_ih: torch.Tensor,
                     w_hh: torch.Tensor,
                     w_ip: torch.Tensor,
                     w_fp: torch.Tensor,
                     w_op: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]:
    """
    An LSTM cell with peephole connections without biases.

    Mostly ripped from the pytorch autograd lstm implementation.
    """
    hx, cx = hidden
    gates = F.linear(input, w_ih) + F.linear(hx, w_hh)

    ingate, forgetgate, cellgate, outgate = gates.chunk(4, 1)
    peep_i = w_ip.unsqueeze(0).expand_as(cx) * cx
    ingate = ingate + peep_i
    peep_f = w_fp.unsqueeze(0).expand_as(cx) * cx
    forgetgate = forgetgate + peep_f

    ingate = F.sigmoid(ingate)
    forgetgate = F.sigmoid(forgetgate)
    cellgate = F.tanh(cellgate)
    cy = (forgetgate * cx) + (ingate * cellgate)
    peep_o = w_op.unsqueeze(0).expand_as(cy) * cy
    outgate = outgate + peep_o
    hy = outgate * F.tanh(cy)

    return hy, cy
开发者ID:mittagessen,项目名称:kraken,代码行数:30,代码来源:layers.py


示例2: pointwise_loss

def pointwise_loss(positive_predictions, negative_predictions, mask=None):
    """
    Logistic loss function.

    Parameters
    ----------

    positive_predictions: tensor
        Tensor containing predictions for known positive items.
    negative_predictions: tensor
        Tensor containing predictions for sampled negative items.
    mask: tensor, optional
        A binary tensor used to zero the loss from some entries
        of the loss tensor.

    Returns
    -------

    loss, float
        The mean value of the loss function.
    """

    positives_loss = (1.0 - F.sigmoid(positive_predictions))
    negatives_loss = F.sigmoid(negative_predictions)

    loss = (positives_loss + negatives_loss)

    if mask is not None:
        mask = mask.float()
        loss = loss * mask
        return loss.sum() / mask.sum()

    return loss.mean()
开发者ID:AlexMikhalev,项目名称:spotlight,代码行数:33,代码来源:losses.py


示例3: forward

    def forward(self, x):
        x1 = self.inc(x)
        x2 = self.down1(x1)
        x3 = self.down2(x2)
        LocOut=self.LocTop(x3)
        LocOut=F.softmax(LocOut)
        RoIs=self.Localization(LocOut,Train=False)        
        
        P_Region=[]
        P_Contour=[]
        for i in range(len(RoIs)):
            Zstart=RoIs[i][0]
            Ystart=RoIs[i][1]
            Xstart=RoIs[i][2]
            Zend=RoIs[i][3]
            Yend=RoIs[i][4]
            Xend=RoIs[i][5]
            #RoI Cropping Layer
            x3_RoI=x3[:,:,Zstart:Zend,Ystart:Yend,Xstart:Xend]
            x2_RoI=x2[:,:,Zstart*2:Zend*2,Ystart*2:Yend*2,Xstart*2:Xend*2]
            x1_RoI=x1[:,:,Zstart*2:Zend*2,Ystart*4:Yend*4,Xstart*4:Xend*4]
            
            p = self.up1(x3_RoI,x2_RoI)
            p = self.up2(p, x1_RoI)
            p_r = self.SegTop1(p)
            p_r = F.sigmoid(p_r)
            p_r = p_r.to('cpu').detach().numpy()
            P_Region.append(p_r)

            p_c = self.SegTop2(p)
            p_c = F.sigmoid(p_c) 
            p_c = p_c.to('cpu').detach().numpy()
            P_Contour.append(p_c)
        
        return P_Region,P_Contour,RoIs
开发者ID:Wulingtian,项目名称:3D-RU-Net,代码行数:35,代码来源:Step3_3D_RU_Net_Train.py


示例4: norm_flow

    def norm_flow(self, params, z, v, logposterior):

        h = F.tanh(params[0][0](z))
        mew_ = params[0][1](h)
        sig_ = F.sigmoid(params[0][2](h)+5.) #[PB,Z]


        z_reshaped = z.view(self.P, self.B, self.z_size)

        gradients = torch.autograd.grad(outputs=logposterior(z_reshaped), inputs=z_reshaped,
                          grad_outputs=self.grad_outputs,
                          create_graph=True, retain_graph=True, only_inputs=True)[0]
        gradients = gradients.detach()

        gradients = gradients.view(-1,self.z_size)


        v = v*sig_ + mew_*gradients

        logdet = torch.sum(torch.log(sig_), 1)


        h = F.tanh(params[1][0](v))
        mew_ = params[1][1](h)
        sig_ = F.sigmoid(params[1][2](h)+5.) #[PB,Z]

        z = z*sig_ + mew_*v

        logdet2 = torch.sum(torch.log(sig_), 1)

        #[PB]
        logdet = logdet + logdet2
        
        #[PB,Z], [PB]
        return z, v, logdet
开发者ID:chriscremer,项目名称:Other_Code,代码行数:35,代码来源:approx_posteriors_v6.py


示例5: forward

    def forward(self, title, pg):

        r_gate = F.sigmoid(self.wrx(title) + self.wrh(pg))
        i_gate = F.sigmoid(self.wix(title) + self.wih(pg))
        n_gate = F.tanh(self.wnx(title) + torch.mul(r_gate, self.wnh(pg)))
        result =  torch.mul(i_gate, pg) + torch.mul(torch.add(-i_gate, 1), n_gate)
        return result
开发者ID:shruthi0898,项目名称:Writing-editing-Network,代码行数:7,代码来源:DecoderRNNFB.py


示例6: forward

 def forward(self, x):
     # reshape input first with batch size tracked
     x = x.view(x.size(0), -1)
     # use required layers
     x = self.dropout(F.sigmoid(self.fc1(x)))
     x = self.dropout(F.sigmoid(self.fc2(x)))
     x = F.sigmoid(self.fc3(x))
     x = self.fc4(x)
     return x
开发者ID:ikhlestov,项目名称:caltech-ml-courses,代码行数:9,代码来源:model_one.py


示例7: forward

    def forward(self, xt, img_fc, state):

        hs = []
        cs = []
        for L in range(self.num_layers):
            # c,h from previous timesteps
            prev_h = state[0][L]
            prev_c = state[1][L]
            # the input to this layer
            if L == 0:
                x = xt
                i2h = self.w2h(x) + self.v2h(img_fc)
            else:
                x = hs[-1]
                x = F.dropout(x, self.drop_prob_lm, self.training)
                i2h = self.i2h[L-1](x)

            all_input_sums = i2h+self.h2h[L](prev_h)

            sigmoid_chunk = all_input_sums.narrow(1, 0, 3 * self.rnn_size)
            sigmoid_chunk = F.sigmoid(sigmoid_chunk)
            # decode the gates
            in_gate = sigmoid_chunk.narrow(1, 0, self.rnn_size)
            forget_gate = sigmoid_chunk.narrow(1, self.rnn_size, self.rnn_size)
            out_gate = sigmoid_chunk.narrow(1, self.rnn_size * 2, self.rnn_size)
            # decode the write inputs
            if not self.use_maxout:
                in_transform = F.tanh(all_input_sums.narrow(1, 3 * self.rnn_size, self.rnn_size))
            else:
                in_transform = all_input_sums.narrow(1, 3 * self.rnn_size, 2 * self.rnn_size)
                in_transform = torch.max(\
                    in_transform.narrow(1, 0, self.rnn_size),
                    in_transform.narrow(1, self.rnn_size, self.rnn_size))
            # perform the LSTM update
            next_c = forget_gate * prev_c + in_gate * in_transform
            # gated cells form the output
            tanh_nex_c = F.tanh(next_c)
            next_h = out_gate * tanh_nex_c
            if L == self.num_layers-1:
                if L == 0:
                    i2h = self.r_w2h(x) + self.r_v2h(img_fc)
                else:
                    i2h = self.r_i2h(x)
                n5 = i2h+self.r_h2h(prev_h)
                fake_region = F.sigmoid(n5) * tanh_nex_c

            cs.append(next_c)
            hs.append(next_h)

        # set up the decoder
        top_h = hs[-1]
        top_h = F.dropout(top_h, self.drop_prob_lm, self.training)
        fake_region = F.dropout(fake_region, self.drop_prob_lm, self.training)

        state = (torch.cat([_.unsqueeze(0) for _ in hs], 0), 
                torch.cat([_.unsqueeze(0) for _ in cs], 0))
        return top_h, fake_region, state
开发者ID:nagizeroiw,项目名称:ImageCaptioning.pytorch,代码行数:57,代码来源:AttModel.py


示例8: forward

    def forward(self, x):

        emb = self.emb(x).unsqueeze(1)  # batch_size * 1 * seq_len * emb_dim
        convs = [F.relu(conv(emb)).squeeze(3) for conv in self.convs]  # [batch_size * num_filter * length]
        pools = [F.max_pool1d(conv, conv.size(2)).squeeze(2) for conv in convs] # [batch_size * num_filter]
        pred = torch.cat(pools, 1)  # batch_size * num_filters_sum
        highway = self.highway(pred)
        pred = F.sigmoid(highway) *  F.relu(highway) + (1. - F.sigmoid(highway)) * pred
        pred = self.softmax(self.lin(self.dropout(pred)))
        return pred
开发者ID:minizhao,项目名称:GAN-for-text,代码行数:10,代码来源:dis_model.py


示例9: forward

    def forward(self, inputs, future=0):
        outputs = []

        hids = []
        states = []
        for _ in range(self.num_hid_layers):
            hids.append(Variable(torch.zeros(1, self.hidden_size).double(), requires_grad=False))
            states.append(Variable(torch.zeros(1, self.hidden_size).double(), requires_grad=False))
            
        # h_t = Variable(torch.zeros(1, self.hidden_size).double(), requires_grad=False)
        # c_t = Variable(torch.zeros(1, self.hidden_size).double(), requires_grad=False)
        # h_t2 = Variable(torch.zeros(1, self.hidden_size).double(), requires_grad=False)
        # c_t2 = Variable(torch.zeros(1, self.hidden_size).double(), requires_grad=False)

        # for i, input_t in enumerate(inputs.chunk(inputs.size(1), dim=1)):
        # print(inputs.size())
        for i in range(inputs.size(0)):
            input_t = inputs[i, :]
            # print(input_t.size())
            input_t = F.sigmoid(self.layers['sigmoid'](input_t))
            h = 0
            val = input_t
            for k in self.layers:
                if k != 'linear' and k != 'sigmoid':
                    hids[h], states[h] = self.layers[k](val, (hids[h], states[h]))
                    val = hids[h]
                    h += 1
            # h_t, c_t = self.lstm1(input_t, (h_t, c_t))
            # h_t2, c_t2 = self.lstm2(h_t, (h_t2, c_t2))
            # h_t = self.gru1(input_t, h_t)
            # h_t2 = self.gru2(h_t, h_t2)
            # print(h_t2.size())
            output = self.layers['linear'](hids[-1])
            outputs += [output]

        # print(output)
        for i in range(future):
            # h_t, c_t = self.lstm1(output, (h_t, c_t))
            # h_t2, c_t2 = self.lstm2(h_t, (h_t2, c_t2))
            output = F.sigmoid(self.layers['sigmoid'](output))
            h = 0
            val = output
            for k in self.layers:
                if k != 'linear' and k != 'sigmoid':
                    hids[h], states[h] = self.layers[k](val, (hids[h], states[h]))
                    val = hids[h]
                    h += 1
            # h_t = self.gru1(input_t, h_t)
            # h_t2 = self.gru2(h_t, h_t2)
            # output = self.linear(h_t2)
            output = self.layers['linear'](hids[-1])
            outputs += [output]

        outputs = torch.stack(outputs, 0).squeeze(2)
        return outputs
开发者ID:zimmerman-cole,项目名称:esn_experiments,代码行数:55,代码来源:rnn.py


示例10: LSTMCell

def LSTMCell(input, hidden, w_ih, w_hh, b_ih=None, b_hh=None):
    hx, cx = hidden
    gates = F.linear(input, w_ih, b_ih) + F.linear(hx, w_hh, b_hh)

    ingate, forgetgate, cellgate, outgate = gates.chunk(4, 1)
    ingate = F.sigmoid(ingate)
    forgetgate = F.sigmoid(forgetgate)
    cellgate = F.tanh(cellgate)
    outgate = F.sigmoid(outgate)

    cy = (forgetgate * cx) + (ingate * cellgate)
    hy = outgate * F.tanh(cy)
    return hy, cy
开发者ID:Northrend,项目名称:pytorch,代码行数:13,代码来源:test_jit.py


示例11: forward

    def forward(self, x1, x2):
        x1 = F.dropout(F.relu(self.layer1_1(x1.view(-1, 784))), self.drop)
        x2 = F.dropout(F.relu(self.layer1_2(x2.view(-1, 784))), self.drop)

        x = F.dropout(F.relu(self.layer2(torch.cat((x1, x2), 1))), self.drop)
        x = F.dropout(F.relu(self.layer3(x)), self.drop)
        x = F.dropout(F.relu(self.layer4(x)), self.drop)

        out1 = F.relu(self.layer5_1(x))
        out1 = F.sigmoid(self.layer6_1(out1))
        out2 = F.relu(self.layer5_2(x))
        out2 = F.sigmoid(self.layer6_2(out2))

        return out1, out2
开发者ID:joshicha,项目名称:VIGAN,代码行数:14,代码来源:networks.py


示例12: step

    def step(self, real_data, verbose: bool = False):
        batch_size = real_data.shape[0]

        real_dis_logit, real_hidden = self.model.dis(real_data)

        latent = self.model.sample_latent(batch_size)

        fake_data = self.model.gen(latent)
        fake_dis_logit, fake_hidden = self.model.dis(fake_data.detach())
        dis_loss = self.loss_type.discriminator_loss(real_dis_logit, fake_dis_logit)
        if self.penalty is not None:
            dis_penalty, grad_norm = self.penalty.penalty(self.model.dis, real_data, fake_data)
        else:
            dis_penalty = 0.
            grad_norm = None

        self.dis_opt.zero_grad()
        (dis_loss + dis_penalty).backward(retain_graph=True)
        self.dis_opt.step()

        fake_dis_logit, fake_hidden = self.model.dis(fake_data)
        gen_loss = self.loss_type.generator_loss(fake_dis_logit)

        self.gen_opt.zero_grad()
        gen_loss.backward(retain_graph=True)
        self.gen_opt.step()

        info_loss = self._information_loss(self.model, fake_hidden, latent)  # type: torch.Tensor
        info_loss *= self.info_weight

        self.gen_opt.zero_grad()
        self.dis_opt.zero_grad()
        self.rec_opt.zero_grad()
        info_loss.backward()
        self.gen_opt.step()
        self.dis_opt.step()
        self.rec_opt.step()

        if verbose:
            real_dis = F.sigmoid(real_dis_logit)
            fake_dis = F.sigmoid(fake_dis_logit)
            text = (f"D_loss = {dis_loss.item():.4f}, "
                    f"G_loss = {gen_loss.item():.4f}, "
                    f"MI = {info_loss.item():.4f}, "
                    f"D(x) = {real_dis.mean().item():.4f}, "
                    f"D(G(z)) = {fake_dis.mean().item():.4f}")
            if self.penalty is not None:
                text += f", |grad D| = {grad_norm.item():.4f}"
            print(text)
开发者ID:dccastro,项目名称:Morpho-MNIST,代码行数:49,代码来源:infogan.py


示例13: forward

    def forward(self, x, k=1):
        
        self.B = x.size()[0]
        mu, logvar = self.encode(x)
        z, logpz, logqz = self.sample(mu, logvar, k=k)  #[P,B,Z]
        x_hat = self.decode(z)  #[PB,X]
        x_hat = x_hat.view(k, self.B, -1)
        # print x_hat.size()
        # print x_hat.size()
        # print x.size()
        logpx = log_bernoulli(x_hat, x)  #[P,B]

        elbo = logpx + logpz - logqz  #[P,B]

        if k>1:
            max_ = torch.max(elbo, 0)[0] #[B]
            elbo = torch.log(torch.mean(torch.exp(elbo - max_), 0)) + max_ #[B]

        elbo = torch.mean(elbo) #[1]

        #for printing
        logpx = torch.mean(logpx)
        logpz = torch.mean(logpz)
        logqz = torch.mean(logqz)
        self.x_hat_sigmoid = F.sigmoid(x_hat)

        return elbo, logpx, logpz, logqz
开发者ID:chriscremer,项目名称:Other_Code,代码行数:27,代码来源:vae_deconv.py


示例14: forward

    def forward(self, xt, fc_feats, att_feats, p_att_feats, state):
        # The p_att_feats here is already projected
        att_size = att_feats.numel() // att_feats.size(0) // self.att_feat_size
        att = p_att_feats.view(-1, att_size, self.att_hid_size)
        
        att_h = self.h2att(state[0][-1])                        # batch * att_hid_size
        att_h = att_h.unsqueeze(1).expand_as(att)            # batch * att_size * att_hid_size
        dot = att + att_h                                   # batch * att_size * att_hid_size
        dot = F.tanh(dot)                                # batch * att_size * att_hid_size
        dot = dot.view(-1, self.att_hid_size)               # (batch * att_size) * att_hid_size
        dot = self.alpha_net(dot)                           # (batch * att_size) * 1
        dot = dot.view(-1, att_size)                        # batch * att_size
        
        weight = F.softmax(dot)                             # batch * att_size
        att_feats_ = att_feats.view(-1, att_size, self.att_feat_size) # batch * att_size * att_feat_size
        att_res = torch.bmm(weight.unsqueeze(1), att_feats_).squeeze(1) # batch * att_feat_size

        all_input_sums = self.i2h(xt) + self.h2h(state[0][-1])
        sigmoid_chunk = all_input_sums.narrow(1, 0, 3 * self.rnn_size)
        sigmoid_chunk = F.sigmoid(sigmoid_chunk)
        in_gate = sigmoid_chunk.narrow(1, 0, self.rnn_size)
        forget_gate = sigmoid_chunk.narrow(1, self.rnn_size, self.rnn_size)
        out_gate = sigmoid_chunk.narrow(1, self.rnn_size * 2, self.rnn_size)

        in_transform = all_input_sums.narrow(1, 3 * self.rnn_size, 2 * self.rnn_size) + \
            self.a2c(att_res)
        in_transform = torch.max(\
            in_transform.narrow(1, 0, self.rnn_size),
            in_transform.narrow(1, self.rnn_size, self.rnn_size))
        next_c = forget_gate * state[1][-1] + in_gate * in_transform
        next_h = out_gate * F.tanh(next_c)

        output = self.dropout(next_h)
        state = (next_h.unsqueeze(0), next_c.unsqueeze(0))
        return output, state
开发者ID:littlebadRobot,项目名称:AI_challenger_Chinese_Caption,代码行数:35,代码来源:Att2inModel.py


示例15: get_probs_and_logits

def get_probs_and_logits(ps=None, logits=None, is_multidimensional=True):
    """
    Convert probability values to logits, or vice-versa. Either ``ps`` or
    ``logits`` should be specified, but not both.

    :param ps: tensor of probabilities. Should be in the interval *[0, 1]*.
        If, ``is_multidimensional = True``, then must be normalized along
        axis -1.
    :param logits: tensor of logit values.  For the multidimensional case,
        the values, when exponentiated along the last dimension, must sum
        to 1.
    :param is_multidimensional: determines the computation of ps from logits,
        and vice-versa. For the multi-dimensional case, logit values are
        assumed to be log probabilities, whereas for the uni-dimensional case,
        it specifically refers to log odds.
    :return: tuple containing raw probabilities and logits as tensors.
    """
    assert (ps is None) != (logits is None)
    if ps is not None:
        eps = _get_clamping_buffer(ps)
        ps_clamped = ps.clamp(min=eps, max=1 - eps)
    if is_multidimensional:
        if ps is None:
            ps = softmax(logits, -1)
        else:
            logits = torch.log(ps_clamped)
    else:
        if ps is None:
            ps = F.sigmoid(logits)
        else:
            logits = torch.log(ps_clamped) - torch.log1p(-ps_clamped)
    return ps, logits
开发者ID:Magica-Chen,项目名称:pyro,代码行数:32,代码来源:util.py


示例16: predict_mask

    def predict_mask(self, x):
        # print (x.size())  #[B,32,480,640]
        x = self.act_func(self.conv1_gp(x))  #[B,32,59,79]
        # print (x.size())
        x = self.act_func(self.conv2_gp(x)) #[B,64,13,18]
        # print (x.size())
        x = self.act_func(self.conv3_gp(x)) #[B,32,6,8]
        # print (x.size())
        

        # print (x.size())
        x = x.view(-1, self.intermediate_size)
        h1 = self.act_func(self.fc1_gp(x))
        z = self.fc2_gp(h1)
        z = self.act_func(self.fc3_gp(z)) 
        z = self.act_func(self.fc4_gp(z))  #[B,11264]
        z = z.view(-1, 32, 6, 8)
        z = self.act_func(self.deconv1_gp(z)) #[B,64,13,18]
        # print (z.size())
        z = self.act_func(self.deconv2_gp(z)) #[B,64,59,79]
        # print (z.size())
        z = self.deconv3_gp(z) # [B,1,452,580]
        # print (z.size())
        # fdf

        return F.sigmoid(z)
开发者ID:chriscremer,项目名称:Other_Code,代码行数:26,代码来源:learn_mask_multDQN_biasframe.py


示例17: _call

 def _call(self, x):
     shape = x.shape[:-1] + (1 + x.shape[-1],)
     one = x.new([1]).expand(x.shape[:-1] + (1,))
     numer = sigmoid(x)
     denom = (1 - numer).cumprod(-1)
     probs = torch.cat([numer, one], -1) * torch.cat([one, denom], -1)
     return probs
开发者ID:MaheshBhosale,项目名称:pytorch,代码行数:7,代码来源:transforms.py


示例18: eval_by_batch

 def eval_by_batch(self,Xi, Xv, y, x_size):
     total_loss = 0.0
     y_pred = []
     if self.use_ffm:
         batch_size = 16384*2
     else:
         batch_size = 16384
     batch_iter = x_size // batch_size
     criterion = F.binary_cross_entropy_with_logits
     model = self.eval()
     for i in range(batch_iter+1):
         offset = i * batch_size
         end = min(x_size, offset + batch_size)
         if offset == end:
             break
         batch_xi = Variable(torch.LongTensor(Xi[offset:end]))
         batch_xv = Variable(torch.FloatTensor(Xv[offset:end]))
         batch_y = Variable(torch.FloatTensor(y[offset:end]))
         if self.use_cuda:
             batch_xi, batch_xv, batch_y = batch_xi.cuda(), batch_xv.cuda(), batch_y.cuda()
         outputs = model(batch_xi, batch_xv)
         pred = F.sigmoid(outputs).cpu()
         y_pred.extend(pred.data.numpy())
         loss = criterion(outputs, batch_y)
         total_loss += loss.data[0]*(end-offset)
     total_metric = self.eval_metric(y,y_pred)
     return total_loss/x_size, total_metric
开发者ID:lionkt-competition,项目名称:dnn_ctr,代码行数:27,代码来源:DIN.py


示例19: forward

    def forward(self, frame, policies):
        # x: [B,2,84,84]
        self.B = frame.size()[0]
        

        #Predict mask
        pre_mask = self.predict_mask_nosigmoid(frame)
        mask = F.sigmoid(pre_mask)

        masked_frame = frame * mask
        kls = []
        for i in range(len(policies)):
            policy = policies[i]

            log_dist_mask = policy.action_logdist(masked_frame)
            log_dist_true = policy.action_logdist(frame)

            action_dist_kl = torch.sum((log_dist_true - log_dist_mask)*torch.exp(log_dist_true), dim=1) #[B]
            action_dist_kl = torch.mean(action_dist_kl) # * 1000
            kls.append(action_dist_kl)

        kls = torch.stack(kls)  #[policies, B]
        action_dist_kl = torch.mean(action_dist_kl) #[1] #over batch and over policies

        pre_mask = pre_mask.view(self.B, -1)
        mask_cost = torch.abs(pre_mask + 20)
        # mask_sum = torch.mean(torch.sum(mask_cost, dim=1)) * .00001
        # mask_cost = torch.mean(mask_cost) * .00001
        mask_cost = torch.mean(mask_cost) * .01

        loss = action_dist_kl + mask_cost

        return loss, action_dist_kl, mask_cost
开发者ID:chriscremer,项目名称:Other_Code,代码行数:33,代码来源:learn_to_mask_amortized_vae_policies.py


示例20: adpW

 def adpW(self,x):
     '''
        calculate the pairwise_att of everypair of inputs
        output_size: (x.size(0),x.size(1)/2)
     '''
     x = x.detach()
     x = self.adp_metric_embedding1(x)
     x = self.adp_metric_embedding1_bn(x)
     x = F.relu(x)
     x = self.adp_metric_embedding2(x)
     x = self.adp_metric_embedding2_bn(x)
     x = F.relu(x)
     x = self.adp_metric_embedding3(x)
     x = self.adp_metric_embedding3_bn(x)
     x = F.relu(x)
     pairwise_att = F.sigmoid(self.adp_metric_embedding4(x))
     # x = self.adp_metric_embedding2_bn(x)
     diag_matrix1 = []
     diag_matrix2 = []
     for i in range(x.size(0)):
         diag_matrix1.append(torch.diag(pairwise_att[i, :x.size(1)/2]))
     for i in range(x.size(0)):
         diag_matrix2.append(torch.diag(pairwise_att[i, x.size(1)/2:]))
     pairwise_att1 = torch.stack(diag_matrix1)
     pairwise_att2 = torch.stack(diag_matrix1)
     return pairwise_att1, pairwise_att2
开发者ID:hh23333,项目名称:FVAE_adversarial,代码行数:26,代码来源:adaptive_triplet.py



注:本文中的torch.nn.functional.sigmoid函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。


鲜花

握手

雷人

路过

鸡蛋
该文章已有0人参与评论

请发表评论

全部评论

专题导读
上一篇:
Python functional.softmax函数代码示例发布时间:2022-05-27
下一篇:
Python functional.relu函数代码示例发布时间:2022-05-27
热门推荐
阅读排行榜

扫描微信二维码

查看手机版网站

随时了解更新最新资讯

139-2527-9053

在线客服(服务时间 9:00~18:00)

在线QQ客服
地址:深圳市南山区西丽大学城创智工业园
电邮:jeky_zhao#qq.com
移动电话:139-2527-9053

Powered by 互联科技 X3.4© 2001-2213 极客世界.|Sitemap