• 设为首页
  • 点击收藏
  • 手机版
    手机扫一扫访问
    迪恩网络手机版
  • 关注官方公众号
    微信扫一扫关注
    公众号

Python torch.multinomial函数代码示例

原作者: [db:作者] 来自: [db:来源] 收藏 邀请

本文整理汇总了Python中torch.multinomial函数的典型用法代码示例。如果您正苦于以下问题:Python multinomial函数的具体用法?Python multinomial怎么用?Python multinomial使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。



在下文中一共展示了multinomial函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。

示例1: sample

 def sample(self,batch_size,seq_len,data=None):
     #用来采样出一个batch的结果,
     """
     data 是已有序列
     """
     #如果没有data就从0开始
     if data is None:
         sample_batch=(torch.zeros(batch_size,seq_len).type(torch.LongTensor)).cuda()
         inp=Variable(torch.zeros(batch_size,1).type(torch.LongTensor)).cuda()
         h=self.init_hidden(batch_size)
         for i in range(seq_len):
             output,h=self.forward(inp,h)
             output=torch.multinomial(output.exp().squeeze(),1)
             sample_batch[:,i]=output.data
             inp=output
         return sample_batch
     #否则就从部分开始
     else:
         sample_batch=(torch.zeros(batch_size,seq_len).type(torch.LongTensor)).cuda()
         inp=Variable(torch.zeros(batch_size,1).type(torch.LongTensor)).cuda()
         h=self.init_hidden(batch_size)
         for i in range(seq_len):
             if i<data.size(1):
                 inp=data[:,i].unsqueeze(1)
             else:
                 inp=sample_batch[:,i-1].unsqueeze(1)
             output,h=self.forward(inp,h)
             output=torch.multinomial(output.exp().squeeze(),1)
             sample_batch[:,i]=output.data
         return sample_batch
开发者ID:minizhao,项目名称:GAN-for-text,代码行数:30,代码来源:gen_model.py


示例2: blue_eval

def blue_eval(output,corpus):
    #采样以后为20*19
    sent_idx=torch.multinomial(output.exp().cpu(), 1).view(-1,19)
    sent_idx=sent_idx.cpu().data.numpy()
    sent_str=[]

    #对生产的一个batch量数据进行处理
    for i in range(sent_idx.shape[0]):
        str_=[str(int(x)) for x in sent_idx[i,:-1]]
        sent_str.append(str_)

    eval_data=[]
    for sent in corpus.valid.numpy():
        eval_data.append([str(int(x)) for x in sent[1:-1]])

    weight = tuple((1. / 4 for _ in range(4)))
    BLEUscores=[]

    for gen_sent in sent_str:
        ref_sent_info=[]
        for ref_sent in eval_data:
            #找到与这个最相似的句子
            common_tokens = Counter(gen_sent) & Counter(ref_sent)
            correct_preds = sum(common_tokens.values())
            recall_wrt = float(correct_preds) / len(gen_sent)
            ref_sent_info.append((ref_sent,recall_wrt))

        ref_sent_info.sort(key=lambda x: -x[1])
        top_refs=[x[0] for x in ref_sent_info[:50]]

        BLEUscore = nltk.translate.bleu_score.sentence_bleu(top_refs, gen_sent, weight)
        BLEUscores.append(BLEUscore)

    score=(np.mean(BLEUscores))
    return score
开发者ID:minizhao,项目名称:GAN-for-text,代码行数:35,代码来源:eval.py


示例3: forward

    def forward(self, fc_feats, att_feats, seq):
        batch_size = fc_feats.size(0)
        state = self.init_hidden(batch_size)
        outputs = []

        for i in range(seq.size(1)):
            if i == 0:
                xt = self.img_embed(fc_feats)
            else:
                if self.training and i >= 2 and self.ss_prob > 0.0: # otherwiste no need to sample
                    sample_prob = fc_feats.data.new(batch_size).uniform_(0, 1)
                    sample_mask = sample_prob < self.ss_prob
                    if sample_mask.sum() == 0:
                        it = seq[:, i-1].clone()
                    else:
                        sample_ind = sample_mask.nonzero().view(-1)
                        it = seq[:, i-1].data.clone()
                        #prob_prev = torch.exp(outputs[-1].data.index_select(0, sample_ind)) # fetch prev distribution: shape Nx(M+1)
                        #it.index_copy_(0, sample_ind, torch.multinomial(prob_prev, 1).view(-1))
                        prob_prev = torch.exp(outputs[-1].data) # fetch prev distribution: shape Nx(M+1)
                        it.index_copy_(0, sample_ind, torch.multinomial(prob_prev, 1).view(-1).index_select(0, sample_ind))
                        it = Variable(it, requires_grad=False)
                else:
                    it = seq[:, i-1].clone()
                # break if all the sequences end
                if i >= 2 and seq[:, i-1].data.sum() == 0:
                    break
                xt = self.embed(it)

            output, state = self.core(xt, state)
            output = F.log_softmax(self.logit(output))
            outputs.append(output)

        return torch.cat([_.unsqueeze(1) for _ in outputs[1:]], 1).contiguous()
开发者ID:littlebadRobot,项目名称:AI_challenger_Chinese_Caption,代码行数:34,代码来源:FCModel.py


示例4: sample

 def sample(self, sample_shape=torch.Size()):
     sample_shape = self._extended_shape(sample_shape)
     param_shape = sample_shape + torch.Size((self._num_events,))
     probs = self.probs.expand(param_shape)
     probs_2d = probs.contiguous().view(-1, self._num_events)
     sample_2d = torch.multinomial(probs_2d, 1, True)
     return sample_2d.contiguous().view(sample_shape)
开发者ID:bhuWenDongchao,项目名称:pytorch,代码行数:7,代码来源:categorical.py


示例5: predict_fn

def predict_fn(input_data, model):
    logger.info('Generating text based on input parameters.')
    corpus = model['corpus']
    model = model['model']

    device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
    logger.info('Current device: {}'.format(device))
    torch.manual_seed(input_data['seed'])
    ntokens = len(corpus.dictionary)
    input = torch.randint(ntokens, (1, 1), dtype=torch.long).to(device)
    hidden = model.init_hidden(1)

    logger.info('Generating {} words.'.format(input_data['words']))
    result = []
    with torch.no_grad():  # no tracking history
        for i in range(input_data['words']):
            output, hidden = model(input, hidden)
            word_weights = output.squeeze().div(input_data['temperature']).exp().cpu()
            word_idx = torch.multinomial(word_weights, 1)[0]
            input.fill_(word_idx)
            word = corpus.dictionary.idx2word[word_idx]
            word = word if type(word) == str else word.decode()
            if word == '<eos>':
                word = '\n'
            elif i % 12 == 11:
                word = word + '\n'
            else:
                word = word + ' '
            result.append(word)
    return ''.join(result)
开发者ID:FNDaily,项目名称:amazon-sagemaker-examples,代码行数:30,代码来源:generate.py


示例6: translate

def translate(enc_input='thisissungkim.iloveyou.', predict_len=100, temperature=0.9):
    input_var = str2tensor(enc_input)
    encoder_hidden = encoder.init_hidden()
    encoder_outputs, encoder_hidden = encoder(input_var, encoder_hidden)

    hidden = encoder_hidden

    predicted = ''
    dec_input = str2tensor(SOS_token)
    for c in range(predict_len):
        output, hidden = decoder(dec_input, hidden)

        # Sample from the network as a multi nominal distribution
        output_dist = output.data.view(-1).div(temperature).exp()
        top_i = torch.multinomial(output_dist, 1)[0]

        # Stop at the EOS
        if top_i is EOS_token:
            break

        predicted_char = chr(top_i)
        predicted += predicted_char

        dec_input = str2tensor(predicted_char)

    return enc_input, predicted
开发者ID:jiayouwyhit,项目名称:PyTorchZeroToAll,代码行数:26,代码来源:14_1_seq2seq.py


示例7: sample

 def sample(self, input, temperature=1., hidden=None):
     hidden = self.module_.init_hidden(1) if hidden is None else hidden
     output, hidden = self.module_(input, hidden)
     probas = output.squeeze().data.div(temperature).exp()
     sample = torch.multinomial(probas, 1)[-1]
     if probas.dim() > 1:
         sample = sample[0]
     return sample, self.repackage_hidden(hidden)
开发者ID:YangHaha11514,项目名称:skorch,代码行数:8,代码来源:net.py


示例8: torch_multinomial

def torch_multinomial(input, num_samples, replacement=False):
    """
    Like `torch.multinomial()` but works with cuda tensors.
    Does not support keyword argument `out`.
    """
    if input.is_cuda:
        return torch_multinomial(input.cpu(), num_samples, replacement).cuda()
    else:
        return torch.multinomial(input, num_samples, replacement)
开发者ID:Magica-Chen,项目名称:pyro,代码行数:9,代码来源:util.py


示例9: sample

    def sample(self, fc_feats, att_feats, opt={}):
        sample_max = opt.get('sample_max', 1)
        beam_size = opt.get('beam_size', 1)
        temperature = opt.get('temperature', 1.0)
        if beam_size > 1:
            return self.sample_beam(fc_feats, att_feats, opt)

        batch_size = fc_feats.size(0)
        state = self.init_hidden(batch_size)

        # embed fc and att feats
        fc_feats = self.fc_embed(fc_feats)
        _att_feats = self.att_embed(att_feats.view(-1, self.att_feat_size))
        att_feats = _att_feats.view(*(att_feats.size()[:-1] + (self.rnn_size,)))

        # Project the attention feats first to reduce memory and computation comsumptions.
        p_att_feats = self.ctx2att(att_feats.view(-1, self.rnn_size))
        p_att_feats = p_att_feats.view(*(att_feats.size()[:-1] + (self.att_hid_size,)))

        seq = []
        seqLogprobs = []
        for t in range(self.seq_length + 1):
            if t == 0: # input <bos>
                it = fc_feats.data.new(batch_size).long().zero_()
            elif sample_max:
                sampleLogprobs, it = torch.max(logprobs.data, 1)
                it = it.view(-1).long()
            else:
                if temperature == 1.0:
                    prob_prev = torch.exp(logprobs.data).cpu() # fetch prev distribution: shape Nx(M+1)
                else:
                    # scale logprobs by temperature
                    prob_prev = torch.exp(torch.div(logprobs.data, temperature)).cpu()
                it = torch.multinomial(prob_prev, 1).cuda()
                sampleLogprobs = logprobs.gather(1, Variable(it, requires_grad=False)) # gather the logprobs at sampled positions
                it = it.view(-1).long() # and flatten indices for downstream processing

            xt = self.embed(Variable(it, requires_grad=False))

            if t >= 1:
                # stop when all finished
                if t == 1:
                    unfinished = it > 0
                else:
                    unfinished = unfinished * (it > 0)
                if unfinished.sum() == 0:
                    break
                it = it * unfinished.type_as(it)
                seq.append(it) #seq[t] the input of t+2 time step

                seqLogprobs.append(sampleLogprobs.view(-1))

            output, state = self.core(xt, fc_feats, att_feats, p_att_feats, state)
            logprobs = F.log_softmax(self.logit(output))

        return torch.cat([_.unsqueeze(1) for _ in seq], 1), torch.cat([_.unsqueeze(1) for _ in seqLogprobs], 1)
开发者ID:littlebadRobot,项目名称:AI_challenger_Chinese_Caption,代码行数:56,代码来源:AttModel.py


示例10: forward

    def forward(self, x, hiddens):
        batchsize = x["s"].size(0)
        if not hasattr(self, "prob"):
            self.prob = x["res"].clone().resize_(2)
            self.prob[0] = 1 - self.args.ratio_skip_observation
            self.prob[1] = self.args.ratio_skip_observation

        skip_mat = self._var(torch.multinomial(self.prob, batchsize, replacement=True).float().view(-1, 1))

        output = self._merge(x, hiddens, skip_mat)
        return self.decision(output)
开发者ID:GenjiWu,项目名称:ELF,代码行数:11,代码来源:model_lstm.py


示例11: sample_K

def sample_K(probs, K, mode='test'):
    probs = 1e-6 + probs*(1 - 2e-6) # to avoid log(0)
    probs = probs.view(-1, 2**K)
    if mode == 'train':
        bin_sample = torch.multinomial(probs, 1).detach()
    else:
        bin_sample = probs.max(1)[1].detach().unsqueeze(1)
    sample = bin_sample.clone().type(dtype)
    log_probs_samples = torch.log(probs).gather(1, bin_sample).squeeze()
    log_probs_samples = log_probs_samples.view(batch_size, N).sum(1)
    return bin_sample.data.view(batch_size, N), log_probs_samples
开发者ID:ParsonsZeng,项目名称:DiCoNet,代码行数:11,代码来源:kmeans.py


示例12: sample

 def sample(self, Q):
     if self.use_actor_critic:
         pi = F.softmax(Q, dim=-1)
         a = torch.multinomial(pi, 1).squeeze()
         return a.data.cpu().numpy()
     else:
         sample = random.random()
         if sample > self.eps_threshold:
             return Q.data.max(1)[1].cpu().numpy()
         else:
             return np.random.randint(0, self.num_actions, self.nenv)
开发者ID:tony32769,项目名称:treeqn,代码行数:11,代码来源:models.py


示例13: main

def main():
    path='my_data1'
    sec_text="I wanna go out tonight"
    n_bins=4

    #加载数据,默认是data1
    corpus = data.Corpus(path=os.path.join("data", path))
    gen=torch.load("models/gen_"+path+".pt").cuda()
    print(gen)

    bin_stream=string2bins(sec_text,n_bins)

    ntokens = len(corpus.dictionary)
    tokens = list(range(ntokens)) # * args.replication_factor
    np.random.shuffle(tokens)
    words_in_bin = int(len(tokens) /n_bins)
    bins = [tokens[i:i + words_in_bin] for i in range(0, len(tokens), words_in_bin)]
    zero = [list(set(tokens) - set(bin_)) for bin_ in bins]



    #循环生成每一个词

    for _ in range(10):

        input = Variable(torch.Tensor([corpus.dictionary.word2idx['<start>']]), volatile=True).view(-1,1).type(torch.LongTensor).cuda()
        h=gen.init_hidden(1)
        gen_words=[]
        for i in range(len(bin_stream[:16])):
            output,h=gen(input,h)

            zero_index = zero[int(bin_stream[i],2)]
            zero_index = torch.LongTensor(zero_index).cuda()

            output = output.squeeze().data.div(0.8).exp()
            output.index_fill_(0, zero_index, 0)

            word_idx = torch.multinomial(output, 1)[0]
            gen_words.append(word_idx)
            input.data.fill_(word_idx)

        print(len(gen_words))
        str_=" ".join([corpus.dictionary.idx2word[x] for x in gen_words])
        print(str_)
开发者ID:minizhao,项目名称:GAN-for-text,代码行数:44,代码来源:gen_steg.py


示例14: sample

    def sample(self, max_time_step=200):
        """generate one sample"""
        sample_words = [self.vocab['<s>']]
        h_tm1 = None
        for t in xrange(max_time_step):
            x_tm1_embed = self.embed(Variable(torch.LongTensor([sample_words[-1]])))
            x_tm1_embed = x_tm1_embed.unsqueeze(0)
            h_t, (last_state, last_cell) = self.lstm(x_tm1_embed, h_tm1)
            h_t = self.dropout(h_t.view(-1))
            p_t = F.softmax(self.read_out(h_t), dim=-1)
            x_t_wid = torch.multinomial(p_t).data[0]
            x_t = self.vocab.id2word[x_t_wid]

            if x_t == '</s>':
                return [self.vocab.id2word[wid] for wid in sample_words[1:]]
            else:
                sample_words.append(x_t_wid)

            h_tm1 = last_state, last_cell
开发者ID:chubbymaggie,项目名称:tranX,代码行数:19,代码来源:neural_lm.py


示例15: sample_from_model

def sample_from_model(model, vectorizer, nationalities, sample_size=20,
                      temperature=1.0):
    num_samples = len(nationalities)
    begin_seq_index = [vectorizer.char_vocab.begin_seq_index
                       for _ in range(num_samples)]
    begin_seq_index = torch.tensor(begin_seq_index, dtype=torch.int64).unsqueeze(dim=1)
    indices = [begin_seq_index]
    nationality_indices = torch.tensor(nationalities, dtype=torch.int64).unsqueeze(dim=0)
    h_t = model.nation_emb(nationality_indices)

    for time_step in range(sample_size):
        x_t = indices[time_step]
        x_emb_t = model.char_emb(x_t)
        rnn_out_t, h_t = model.rnn(x_emb_t, h_t)
        prediction_vector = model.fc(rnn_out_t.squeeze(dim=1))
        probability_vector = F.softmax(prediction_vector / temperature, dim=1)
        indices.append(torch.multinomial(probability_vector, num_samples=1))
    indices = torch.stack(indices).squeeze().permute(1, 0)
    return indices
开发者ID:HadXu,项目名称:machine-learning,代码行数:19,代码来源:conditional_example.py


示例16: forward

    def forward(self, fc_feats, att_feats, seq):
        batch_size = fc_feats.size(0)
        state = self.init_hidden(batch_size)

        outputs = []

        # embed fc and att feats
        fc_feats = self.fc_embed(fc_feats)
        _att_feats = self.att_embed(att_feats.view(-1, self.att_feat_size))
        att_feats = _att_feats.view(*(att_feats.size()[:-1] + (self.rnn_size,)))

        # Project the attention feats first to reduce memory and computation comsumptions.
        p_att_feats = self.ctx2att(att_feats.view(-1, self.rnn_size))
        p_att_feats = p_att_feats.view(*(att_feats.size()[:-1] + (self.att_hid_size,)))

        for i in range(seq.size(1) - 1):
            if self.training and i >= 1 and self.ss_prob > 0.0: # otherwiste no need to sample
                sample_prob = fc_feats.data.new(batch_size).uniform_(0, 1)
                sample_mask = sample_prob < self.ss_prob
                if sample_mask.sum() == 0:
                    it = seq[:, i].clone()
                else:
                    sample_ind = sample_mask.nonzero().view(-1)
                    it = seq[:, i].data.clone()
                    #prob_prev = torch.exp(outputs[-1].data.index_select(0, sample_ind)) # fetch prev distribution: shape Nx(M+1)
                    #it.index_copy_(0, sample_ind, torch.multinomial(prob_prev, 1).view(-1))
                    prob_prev = torch.exp(outputs[-1].data) # fetch prev distribution: shape Nx(M+1)
                    it.index_copy_(0, sample_ind, torch.multinomial(prob_prev, 1).view(-1).index_select(0, sample_ind))
                    it = Variable(it, requires_grad=False)
            else:
                it = seq[:, i].clone()          
            # break if all the sequences end
            if i >= 1 and seq[:, i].data.sum() == 0:
                break

            xt = self.embed(it)

            output, state = self.core(xt, fc_feats, att_feats, p_att_feats, state)
            output = F.log_softmax(self.logit(output))
            outputs.append(output)

        return torch.cat([_.unsqueeze(1) for _ in outputs], 1)
开发者ID:littlebadRobot,项目名称:AI_challenger_Chinese_Caption,代码行数:42,代码来源:AttModel.py


示例17: sample

    def sample(self, netW, input, state, opt={}):
        sample_max = opt.get('sample_max', 1)
        beam_size = opt.get('beam_size', 1)
        temperature = opt.get('temperature', 1.0)
        seq_length = opt.get('seq_length', 9)
        self.seq_length = seq_length

        if beam_size > 1:
            return self.sample_beam(netW, input, state, opt)

        batch_size = input.size(1)
        seq = []
        seqLogprobs = []
        for t in range(self.seq_length + 1):
            if t == 0: # input <bos>
                it = input.data
            elif sample_max:
                sampleLogprobs, it = torch.max(logprobs.data, 1)
                it = it.view(-1).long()
            else:
                if temperature == 1.0:
                    prob_prev = torch.exp(logprobs.data).cpu() # fetch prev distribution: shape Nx(M+1)
                else:
                    # scale logprobs by temperature
                    prob_prev = torch.exp(torch.div(logprobs.data, temperature)).cpu()
                it = torch.multinomial(prob_prev, 1).cuda()
                sampleLogprobs = logprobs.gather(1, Variable(it, requires_grad=False)) # gather the logprobs at sampled positions
                it = it.view(-1).long() # and flatten indices for downstream processing

            xt = netW(Variable(it.view(1,-1), requires_grad=False))

            if t >= 1:
                seq.append(it) #seq[t] the input of t+2 time step
                seqLogprobs.append(sampleLogprobs.view(-1))

            output, state = self.rnn(xt, state)

            output = F.dropout(output, self.d, training=self.training)
            decoded = self.decoder(output.view(output.size(0)*output.size(1), output.size(2)))
            logprobs = F.log_softmax(self.beta * decoded)

        return torch.cat([_.unsqueeze(1) for _ in seq], 1), torch.cat([_.unsqueeze(1) for _ in seqLogprobs], 1)
开发者ID:AashishV,项目名称:visDial.pytorch,代码行数:42,代码来源:netG.py


示例18: initialize

def initialize(data):
    pyro.clear_param_store()

    optim = pyro.optim.Adam({'lr': 0.1, 'betas': [0.8, 0.99]})
    elbo = TraceEnum_ELBO(max_iarange_nesting=1)
    svi = SVI(model, full_guide, optim, loss=elbo)

    # Initialize weights to uniform.
    pyro.param('auto_weights', 0.5 * torch.ones(K), constraint=constraints.simplex)

    # Assume half of the data variance is due to intra-component noise.
    var = (data.var() / 2).sqrt()
    pyro.param('auto_scale', torch.tensor([var]*4), constraint=constraints.positive)

    # Initialize means from a subsample of data.
    pyro.param('auto_locs', data[torch.multinomial(torch.ones(len(data)) / len(data), K)])

    loss = svi.loss(model, full_guide, data)

    return loss, svi
开发者ID:mcdickenson,项目名称:em-gaussian,代码行数:20,代码来源:em-gaussian-pyro.py


示例19: __init__

    def __init__(self, input_dim, hidden_dim, output_dim_multiplier=1,
                 mask_encoding=None, permutation=None):
        super(AutoRegressiveNN, self).__init__()
        self.input_dim = input_dim
        self.hidden_dim = hidden_dim
        self.output_dim_multiplier = output_dim_multiplier

        if mask_encoding is None:
            # the dependency structure is chosen at random
            self.mask_encoding = 1 + torch.multinomial(torch.ones(input_dim - 1) / (input_dim - 1),
                                                       num_samples=hidden_dim, replacement=True)
        else:
            # the dependency structure is given by the user
            self.mask_encoding = mask_encoding

        if permutation is None:
            # a permutation is chosen at random
            self.permutation = torch.randperm(input_dim, device=torch.device('cpu'))
        else:
            # the permutation is chosen by the user
            self.permutation = permutation

        # these masks control the autoregressive structure
        self.mask1 = torch.zeros(hidden_dim, input_dim)
        self.mask2 = torch.zeros(input_dim * self.output_dim_multiplier, hidden_dim)

        for k in range(hidden_dim):
            # fill in mask1
            m_k = self.mask_encoding[k].item()
            slice_k = torch.cat([torch.ones(m_k), torch.zeros(input_dim - m_k)])
            for j in range(input_dim):
                self.mask1[k, self.permutation[j]] = slice_k[j]
            # fill in mask2
            slice_k = torch.cat([torch.zeros(m_k), torch.ones(input_dim - m_k)])
            for r in range(self.output_dim_multiplier):
                for j in range(input_dim):
                    self.mask2[r * input_dim + self.permutation[j], k] = slice_k[j]

        self.lin1 = MaskedLinear(input_dim, hidden_dim, self.mask1)
        self.lin2 = MaskedLinear(hidden_dim, input_dim * output_dim_multiplier, self.mask2)
        self.relu = nn.ReLU()
开发者ID:lewisKit,项目名称:pyro,代码行数:41,代码来源:auto_reg_nn.py


示例20: forward

    def forward(self, fc_feats, att_feats, seq):
        batch_size = fc_feats.size(0)
        state = self.init_hidden(batch_size)
        #print ("batch size:",batch_size)  ## 50 (10 images 50 captions(sentence))
        #print ("seq size:",seq.size())  ##  (50L, 18L)
        #print("seq : ",seq[29].data)  ## seq bug, data loader 
        outputs = []

        for i in range(seq.size(1)):
            if i == 0:
                xt = self.img_embed(fc_feats)
            else:
                if self.training and i >= 2 and self.ss_prob > 0.0: # otherwiste no need to sample
                    sample_prob = fc_feats.data.new(batch_size).uniform_(0, 1)
                    sample_mask = sample_prob < self.ss_prob
                    if sample_mask.sum() == 0:
                        it = seq[:, i-1].clone()
                    else:
                        sample_ind = sample_mask.nonzero().view(-1)
                        it = seq[:, i-1].data.clone()
                        #prob_prev = torch.exp(outputs[-1].data.index_select(0, sample_ind)) # fetch prev distribution: shape Nx(M+1)
                        #it.index_copy_(0, sample_ind, torch.multinomial(prob_prev, 1).view(-1))
                        prob_prev = torch.exp(outputs[-1].data) # fetch prev distribution: shape Nx(M+1)
                        it.index_copy_(0, sample_ind, torch.multinomial(prob_prev, 1).view(-1).index_select(0, sample_ind))
                        it = Variable(it, requires_grad=False)
                else:
                    it = seq[:, i-1].clone()                
                # break if all the sequences end
                if i >= 2 and seq[:, i-1].data.sum() == 0:
                    break
                xt = self.embed(it)
                
            output, state = self.core(xt.unsqueeze(0), state) ## call lstm
            output = F.log_softmax(self.logit(self.dropout(output.squeeze(0))))
            outputs.append(output)
        #print ("output: length",len(outputs))  # length 18
        t = torch.cat([_.unsqueeze(1) for _ in outputs[1:]], 1).contiguous() # 1-index

        #print("output size:",t.size())
        #exit()
        return t  #(50L, 17L, 9488L)
开发者ID:littlebadRobot,项目名称:AI_challenger_Chinese_Caption,代码行数:41,代码来源:ShowTellModel.py



注:本文中的torch.multinomial函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。


鲜花

握手

雷人

路过

鸡蛋
该文章已有0人参与评论

请发表评论

全部评论

专题导读
上一篇:
Python torch.no_grad函数代码示例发布时间:2022-05-27
下一篇:
Python torch.mul函数代码示例发布时间:2022-05-27
热门推荐
阅读排行榜

扫描微信二维码

查看手机版网站

随时了解更新最新资讯

139-2527-9053

在线客服(服务时间 9:00~18:00)

在线QQ客服
地址:深圳市南山区西丽大学城创智工业园
电邮:jeky_zhao#qq.com
移动电话:139-2527-9053

Powered by 互联科技 X3.4© 2001-2213 极客世界.|Sitemap