• 设为首页
  • 点击收藏
  • 手机版
    手机扫一扫访问
    迪恩网络手机版
  • 关注官方公众号
    微信扫一扫关注
    公众号

Python torch.argmax函数代码示例

原作者: [db:作者] 来自: [db:来源] 收藏 邀请

本文整理汇总了Python中torch.argmax函数的典型用法代码示例。如果您正苦于以下问题:Python argmax函数的具体用法?Python argmax怎么用?Python argmax使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。



在下文中一共展示了argmax函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。

示例1: train

def train(model,trainLoader,criterion, optimizer,evalData = None,
            epoch=1,echoStep=100,evalStep=1000,saveStep=5000,savePath="./"):
    
    if evalData != None:
        evalX,evalY = evalData
        if torch.cuda.is_available():
            evalY = evalY.cuda()
            if isinstance (evalX,list):
                for ti,t in enumerate(evalX):
                    evalX[ti] = evalX[ti].cuda()
            else:
                evalX = evalX.cuda()

    batchLen = len(trainLoader)
    for epochIdx in xrange(epoch):
        for i,batch in enumerate(trainLoader,batchLen * epochIdx + 1):
            x, y = batch            
            if torch.cuda.is_available():
                y = y.cuda()
                if isinstance (x,list):
                    for ti,t in enumerate(x):
                        x[ti] = x[ti].cuda()
                else:
                    x = x.cuda()
            out = model(x)
            loss = criterion(out, y)
            
            prob = F.softmax(out, 1) 
            pred = torch.argmax(out, dim=1)
            correct = pred.eq(y).sum()
            acc = float(correct) / len(y)
            
            #print loss
            if i % echoStep == 0:
                print "Step %d/%d/%d : Loss %.4f , Acc %.4f " %(i,batchLen*epoch,epochIdx+1,float(loss),acc)
            #evaluate
            if i % evalStep == 0 and evalData != None:
                evalOut = model(evalX)
                evalLoss = criterion(evalOut, evalY)
                correct = torch.argmax(F.softmax(evalOut, 1) , dim=1).eq(evalY).sum()
                evalAcc = float(correct) / len(evalY)
                print "------------------------------------------------"
                print "Evaluate %d Sample : Loss %.4f , Acc %.4f " %(evalY.size(0),float(evalLoss),evalAcc)
                print
            #save model        
            if i % saveStep == 0:
                outFile = "%s/m_%d_%d.pt" %(savePath,i,epochIdx+1)
                torch.save(model.state_dict(),outFile)
                print "Save model : %s" %(outFile)

            #backward
            optimizer.zero_grad()
            loss.backward()
            optimizer.step()

    outFile = "%s/final.pt" %(savePath)
    torch.save(model.state_dict(),outFile)
    print "Save model : %s" %(outFile)
开发者ID:quanwei888,项目名称:myspace,代码行数:58,代码来源:SimLSTMModel.py


示例2: run

    def run(self):
        # Loss and Optimizer
        criterion = nn.CrossEntropyLoss()
        params = filter(lambda p: p.requires_grad, self.model.parameters())
        optimizer = self.opt.optimizer(params, lr=self.opt.learning_rate)

        max_test_acc = 0
        global_step = 0
        for epoch in range(self.opt.num_epoch):
            print('>' * 100)
            print('epoch: ', epoch)
            n_correct, n_total = 0, 0
            for i_batch, sample_batched in enumerate(self.train_data_loader):
                global_step += 1

                # switch model to training mode, clear gradient accumulators
                self.model.train()
                optimizer.zero_grad()

                inputs = [sample_batched[col].to(opt.device) for col in self.opt.inputs_cols]
                targets = sample_batched['polarity'].to(opt.device)
                outputs = self.model(inputs)

                loss = criterion(outputs, targets)
                loss.backward()
                optimizer.step()

                if global_step % self.opt.log_step == 0:
                    n_correct += (torch.argmax(outputs, -1) == targets).sum().item()
                    n_total += len(outputs)
                    train_acc = n_correct / n_total

                    # switch model to evaluation mode
                    self.model.eval()
                    n_test_correct, n_test_total = 0, 0
                    with torch.no_grad():
                        for t_batch, t_sample_batched in enumerate(self.test_data_loader):
                            t_inputs = [t_sample_batched[col].to(opt.device) for col in self.opt.inputs_cols]
                            t_targets = t_sample_batched['polarity'].to(opt.device)
                            t_outputs = self.model(t_inputs)

                            n_test_correct += (torch.argmax(t_outputs, -1) == t_targets).sum().item()
                            n_test_total += len(t_outputs)
                        test_acc = n_test_correct / n_test_total
                        if test_acc > max_test_acc:
                            max_test_acc = test_acc

                        print('loss: {:.4f}, acc: {:.4f}, test_acc: {:.4f}'.format(loss.item(), train_acc, test_acc))

                        # log
                        self.writer.add_scalar('loss', loss, global_step)
                        self.writer.add_scalar('acc', train_acc, global_step)
                        self.writer.add_scalar('test_acc', test_acc, global_step)

        self.writer.close()

        print('max_test_acc: {0}'.format(max_test_acc))
        return max_test_acc
开发者ID:coder352,项目名称:shellscript,代码行数:58,代码来源:train.py


示例3: __init__

    def __init__(self, probs=None, logits=None, validate_args=None):
        if probs is not None:
            new_probs = torch.zeros_like(probs, dtype=torch.float)
            new_prob[torch.argmax(probs, dim=0)] = 1.0
            probs = new_probs
        elif logits is not None:
            new_logits = torch.full_like(logits, -1e8, dtype=torch.float)
            max_idx = torch.argmax(logits, dim=0)
            new_logits[max_idx] = logits[max_idx]
            logits = new_logits

        super(Argmax, self).__init__(probs=probs, logits=logits, validate_args=validate_args)
开发者ID:ronald-xie,项目名称:SLM-Lab,代码行数:12,代码来源:policy_util.py


示例4: _get_state_action_values

    def _get_state_action_values(self, transitions):
        batch_size = len(transitions)
        batch = Transition(*zip(*transitions))

        non_final_mask = torch.tensor(tuple(map(lambda s: s is not None, batch.next_state)), dtype=torch.uint8, device=self.config.device)

        state_batch = torch.cat(batch.state).to(torch.float32)
        action_batch = torch.cat(batch.action)
        reward_batch = torch.cat(batch.reward).to(torch.float32)
        
        next_state_values = torch.zeros(batch_size).to(self.config.device, dtype=torch.float32)

        next_states = [s for s in batch.next_state if s is not None]
        if len(next_states) != 0:
            with torch.no_grad():
                non_final_next_state = torch.cat(next_states).to(torch.float32)

                Q = self._get_Q(self.model, non_final_next_state)
                best_actions = torch.argmax(Q, dim=1, keepdim=True)

                Q_target = self._get_Q(self.target_model, non_final_next_state)
                next_state_values[non_final_mask] = Q_target.gather(1, best_actions).squeeze()

        gamma = self.config.gamma ** self.config.num_multi_step_reward
        expected_values = reward_batch + gamma * next_state_values

        with torch.set_grad_enabled(self.model.training):
            Q = self._get_Q(self.model, state_batch)
            values = torch.squeeze(Q.gather(1, action_batch))
            values.to(self.config.device, dtype=torch.float32)

        return (values, expected_values)
开发者ID:y-kamiya,项目名称:machine-learning-samples,代码行数:32,代码来源:agent.py


示例5: forward_greedy

	def forward_greedy(self,z,num_steps,temperature,x=None):
		predictions = []
		batch_size = z.size(0)
		next_input = z.new_zeros(size=(batch_size,num_steps),dtype=torch.long,requires_grad=False)
		next_input[:,:] = self.PAD_TOKEN
		next_input[:,0] = self.SOS_TOKEN # <sos> token
		z = self.activation(self.z2h(z)).view(batch_size,1,-1).repeat(1,num_steps,1)
		for i in range(num_steps):
			input = next_input
			step_input = self.embedding(input)
			step_input = self.pos_embedding(step_input)
			step_input = torch.cat([step_input,z],dim=2) # step_input is of size [batch,seq_len,step_input_size]
			step_input = self.activation(self.s2h(step_input))
			non_pad_mask = get_non_pad_mask(input,self.PAD_TOKEN)
			slf_attn_mask_subseq = get_subsequent_mask(input)
			slf_attn_mask_keypad = get_attn_key_pad_mask(input,self.PAD_TOKEN)
			attn_mask = (slf_attn_mask_keypad + slf_attn_mask_subseq).gt(0)
			out = self.transformer(step_input,non_pad_mask=non_pad_mask,attn_mask=attn_mask)
			out = out[:,i,:]
			out = self.activation(out)
			out = self.h2o(out)
			out = self.last_activation(out,temperature)
			if x is not None: # teacher forcing
				previous_output = x[:,i]
			else: # use prediction as input
				previous_output = torch.argmax(out,dim=-1)
				previous_output = previous_output.detach()
			next_input = torch.cat([input[:,:i+1],previous_output.view(-1,1),input[:,i+2:]],dim=1).detach()
			predictions.append(out)
		output = torch.stack(predictions).transpose(1,0)
		return output
开发者ID:mhattingpete,项目名称:GenerativeAdversarialNetworks,代码行数:31,代码来源:generators.py


示例6: sample_relax

    def sample_relax(logits, surrogate):
        cat = Categorical(logits=logits)
        u = torch.rand(B,C).clamp(1e-10, 1.-1e-10).cuda()
        gumbels = -torch.log(-torch.log(u))
        z = logits + gumbels
        b = torch.argmax(z, dim=1) #.view(B,1)
        logprob = cat.log_prob(b).view(B,1)


        # czs = []
        # for j in range(1):
        #     z = sample_relax_z(logits)
        #     surr_input = torch.cat([z, x, logits.detach()], dim=1)
        #     cz = surrogate.net(surr_input)
        #     czs.append(cz)
        # czs = torch.stack(czs)
        # cz = torch.mean(czs, dim=0)#.view(1,1)
        surr_input = torch.cat([z, x, logits.detach()], dim=1)
        cz = surrogate.net(surr_input)


        cz_tildes = []
        for j in range(1):
            z_tilde = sample_relax_given_b(logits, b)
            surr_input = torch.cat([z_tilde, x, logits.detach()], dim=1)
            cz_tilde = surrogate.net(surr_input)
            cz_tildes.append(cz_tilde)
        cz_tildes = torch.stack(cz_tildes)
        cz_tilde = torch.mean(cz_tildes, dim=0) #.view(B,1)

        return b, logprob, cz, cz_tilde
开发者ID:chriscremer,项目名称:Other_Code,代码行数:31,代码来源:gmm_cleaned_v5.py


示例7: viterbi

    def viterbi(self, emit, mask):
        T, B, N = emit.shape
        lens = mask.sum(dim=0)
        delta = torch.zeros(T, B, N)
        paths = torch.zeros(T, B, N, dtype=torch.long)

        delta[0] = self.strans + emit[0]  # [B, N]

        for i in range(1, T):
            trans_i = self.trans.unsqueeze(0)  # [1, N, N]
            emit_i = emit[i].unsqueeze(1)  # [B, 1, N]
            scores = trans_i + emit_i + delta[i - 1].unsqueeze(2)  # [B, N, N]
            delta[i], paths[i] = torch.max(scores, dim=1)

        predicts = []
        for i, length in enumerate(lens):
            prev = torch.argmax(delta[length - 1, i] + self.etrans)

            predict = [prev]
            for j in reversed(range(1, length)):
                prev = paths[j, i, prev]
                predict.append(prev)
            # 反转预测序列并保存
            predicts.append(torch.tensor(predict).flip(0))

        return torch.cat(predicts)
开发者ID:zysite,项目名称:post,代码行数:26,代码来源:crf.py


示例8: test

def test(model):
    game_state = GameState()

    # initial action is do nothing
    action = torch.zeros([model.number_of_actions], dtype=torch.float32)
    action[0] = 1
    image_data, reward, terminal = game_state.frame_step(action)
    image_data = resize_and_bgr2gray(image_data)
    image_data = image_to_tensor(image_data)
    state = torch.cat((image_data, image_data, image_data, image_data)).unsqueeze(0)

    while True:
        # get output from the neural network
        output = model(state)[0]

        action = torch.zeros([model.number_of_actions], dtype=torch.float32)
        if torch.cuda.is_available():  # put on GPU if CUDA is available
            action = action.cuda()

        # get action
        action_index = torch.argmax(output)
        if torch.cuda.is_available():  # put on GPU if CUDA is available
            action_index = action_index.cuda()
        action[action_index] = 1

        # get next state
        image_data_1, reward, terminal = game_state.frame_step(action)
        image_data_1 = resize_and_bgr2gray(image_data_1)
        image_data_1 = image_to_tensor(image_data_1)
        state_1 = torch.cat((state.squeeze(0)[1:, :, :], image_data_1)).unsqueeze(0)

        # set state to be state_1
        state = state_1
开发者ID:ZedZero,项目名称:dqn_flappy_bird,代码行数:33,代码来源:dqn.py


示例9: get_pm_loss

    def get_pm_loss(self, image,
                        alpha = 0.0,
                        topk = 0,
                        use_baseline = True,
                        use_term_one_baseline = True,
                        n_samples = 1):

        class_weights = self.pixel_attention(image)
        log_q = torch.log(class_weights)

        # kl term
        kl_pixel_probs = (class_weights * log_q).sum()

        f_pixel = lambda i : self.get_loss_cond_pixel_1d(image, i) + \
                    kl_pixel_probs

        avg_pm_loss = 0.0
        # TODO: n_samples would be more elegant as an
        # argument to get_partial_marginal_loss
        for k in range(n_samples):
            pm_loss = pm_lib.get_partial_marginal_loss(f_pixel, log_q, alpha, topk,
                                        use_baseline = use_baseline,
                                        use_term_one_baseline = use_term_one_baseline)

            avg_pm_loss += pm_loss / n_samples

        map_locations = torch.argmax(log_q.detach(), dim = 1)
        map_cond_losses = f_pixel(map_locations).mean()

        return avg_pm_loss, map_cond_losses
开发者ID:Runjing-Liu120,项目名称:discrete_vae_experimentation,代码行数:30,代码来源:mnist_vae_lib.py


示例10: get_pm_loss

    def get_pm_loss(self, image, image_so_far, var_so_far,
                            alpha = 0.0,
                            topk = 0,
                            use_baseline = True,
                            use_term_one_baseline = True,
                            n_samples = 1):

        resid_image = image - image_so_far
        class_weights = self.get_pixel_probs(resid_image, var_so_far)
        log_q = torch.log(class_weights)

        # kl term
        kl_a = (class_weights * log_q).sum()

        f_z = lambda i : self.get_loss_conditional_a(resid_image, image_so_far, var_so_far, i)[0] + kl_a

        avg_pm_loss = 0.0
        # TODO: n_samples would be more elegant as an
        # argument to get_partial_marginal_loss
        for k in range(n_samples):
            pm_loss = pm_lib.get_partial_marginal_loss(f_z, log_q, alpha, topk,
                                        use_baseline = use_baseline,
                                        use_term_one_baseline = use_term_one_baseline)
            avg_pm_loss += pm_loss / n_samples

        map_locations = torch.argmax(log_q.detach(), dim = 1)
        map_cond_losses = f_z(map_locations).mean()

        return avg_pm_loss, map_cond_losses
开发者ID:Runjing-Liu120,项目名称:discrete_vae_experimentation,代码行数:29,代码来源:galaxy_experiments_lib.py


示例11: sample_relax

    def sample_relax(probs):
        #Sample z
        u = torch.rand(B,C)
        gumbels = -torch.log(-torch.log(u))
        z = torch.log(probs) + gumbels

        b = torch.argmax(z, dim=1)
        logprob = cat.log_prob(b)


        #Sample z_tilde
        u_b = torch.rand(B,1)
        z_tilde_b = -torch.log(-torch.log(u_b))
        u = torch.rand(B,C)
        z_tilde = -torch.log((- torch.log(u) / probs) - torch.log(u_b))

        # print (z_tilde)
        z_tilde[:,b] = z_tilde_b
        # print (z_tilde)

        # fasdfasd

        # print (z)
        # print (b)
        # print (z_tilde)
        # print (logprob)
        # print (probs)
        # fsdfa

        return z, b, logprob, z_tilde
开发者ID:chriscremer,项目名称:Other_Code,代码行数:30,代码来源:is_pz_grad_dependent_on_theta_2.py


示例12: predict

def predict(model, data, device):
    model.eval()
    with torch.no_grad():
        data = data.to(device)
        output = model(data)
        pred = torch.argmax(output.data, 1)
    return output, pred
开发者ID:jfnavarro,项目名称:st_analysis,代码行数:7,代码来源:supervised_torch.py


示例13: train

def train(model, trn_loader, optimizer, loss_func, device):
    model.train()
    training_loss = 0
    training_acc = 0
    counter = 0
    for data, target in trn_loader:
        data = Variable(data.to(device))
        target = Variable(target.to(device))
        # Forward pass
        output = model(data)
        tloss = loss_func(output, target)
        training_loss += tloss.item()
        # Zero the gradients
        optimizer.zero_grad()
        # Backward pass
        tloss.backward()
        # Update parameters
        optimizer.step()
        # Compute prediction's score
        pred = torch.argmax(output.data, 1)
        training_acc += accuracy_score(target.data.cpu().numpy(),
                                       pred.data.cpu().numpy())
        counter += 1
    avg_loss = training_loss / float(counter)
    avg_acc = training_acc / float(counter)
    return avg_loss, avg_acc
开发者ID:jfnavarro,项目名称:st_analysis,代码行数:26,代码来源:supervised_torch.py


示例14: sample_relax

def sample_relax(logits): #, k=1):
    

    # u = torch.rand(B,C).clamp(1e-8, 1.-1e-8) #.cuda()
    u = torch.rand(B,C).clamp(1e-12, 1.-1e-12) #.cuda()
    gumbels = -torch.log(-torch.log(u))
    z = logits + gumbels
    b = torch.argmax(z, dim=1)

    cat = Categorical(logits=logits)
    logprob = cat.log_prob(b).view(B,1)

    v_k = torch.rand(B,1).clamp(1e-12, 1.-1e-12)
    z_tilde_b = -torch.log(-torch.log(v_k))
    #this way seems biased even tho it shoudlnt be
    # v_k = torch.gather(input=u, dim=1, index=b.view(B,1))
    # z_tilde_b = torch.gather(input=z, dim=1, index=b.view(B,1))

    v = torch.rand(B,C).clamp(1e-12, 1.-1e-12) #.cuda()
    probs = torch.softmax(logits,dim=1).repeat(B,1)
    # print (probs.shape, torch.log(v_k).shape, torch.log(v).shape)
    # fasdfa

    # print (v.shape)
    # print (v.shape)
    z_tilde = -torch.log((- torch.log(v) / probs) - torch.log(v_k))

    # print (z_tilde)
    # print (z_tilde_b)
    z_tilde.scatter_(dim=1, index=b.view(B,1), src=z_tilde_b)
    # print (z_tilde)
    # fasdfs

    return z, b, logprob, z_tilde
开发者ID:chriscremer,项目名称:Other_Code,代码行数:34,代码来源:plotting_cat_grads_dist_4.py


示例15: predict_loader

def predict_loader(data_loader):

    #compute predictions and apply softmax
    predictions = model.predict(data_loader,apply_softmax=True)

    #print the class and accuracy for each prediction
    for pred in predictions:
        print("Prediction: {} , Accuracy: {} ".format(torch.argmax(pred), torch.max(pred)))
开发者ID:mamonraab,项目名称:TorchFusion,代码行数:8,代码来源:mnist-advanced.py


示例16: predict

 def predict(model, batches, weights_matrix):
     model.embedding, _ = create_emb_layer(weights_matrix)
     model.embedding =  model.embedding.cuda()
     model.eval()
     # Only 1 batch and 1 item in that batch
     for batch in batches:
         pred = model(batch)
         return torch.argmax(F.softmax(pred,dim=1),1)
开发者ID:bearcave9,项目名称:Weekend-Projects,代码行数:8,代码来源:AOA_LSTM.py


示例17: predict_image

def predict_image(image_path):
    img = Image.open(image_path)
    img = transformations(img)

    pred = model.predict(img,apply_softmax=True)

    # print the class and accuracy predicted
    print("Prediction: {} , Accuracy: {} ".format(torch.argmax(pred), torch.max(pred)))
开发者ID:mamonraab,项目名称:TorchFusion,代码行数:8,代码来源:mnist-advanced.py


示例18: test

    def test(model, batches, weights_matrix):
        model.embedding, _ = create_emb_layer(weights_matrix)
        model.embedding =  model.embedding.cuda()
        model.eval()
        total_acc = 0
        count = 0
        cm = torch.zeros(3,3)
        for batch in batches:
            batch = create_sorted_batch(batch)
            label = batch['sentiment']
            pred = model(batch)
            acc = accuracy(torch.argmax(F.softmax(pred,dim=1),1).float(), label.float().cuda())
            cm += torch.from_numpy(confusion_matrix(label, torch.argmax(pred,1), \
                    labels=[torch.tensor(0), torch.tensor(1), torch.tensor(2)])).float()
            total_acc += acc.item() 
            count += len(label)

        return total_acc/count, cm
开发者ID:bearcave9,项目名称:Weekend-Projects,代码行数:18,代码来源:AOA_LSTM.py


示例19: predict

 def predict(self, X):
     """ sklearn interface without creating graph """
     X = X.to(device =self.cf_a.device )
     if (self.cf_a.task_type == "regression"):
         with torch.no_grad():
             return self.forward(X)
     elif(self.cf_a.task_type == "classification"):
         with torch.no_grad():
             return torch.argmax(self.forward(X),1)
开发者ID:manuwhs,项目名称:Trapyng,代码行数:9,代码来源:GeneralVBModel.py


示例20: eval_

    def eval_(model, batches, criterion):
        model.eval()
        total_loss = 0
        total_acc = 0
        count = 0
        cm = torch.zeros(3,3)
        for batch in batches:
            batch = create_sorted_batch(batch)
            label = batch['sentiment']
            pred = model(batch)
            loss = criterion(pred, label.cuda())
            acc = accuracy(torch.argmax(F.softmax(pred,dim=1),1).float(), label.float().cuda())
            cm += torch.from_numpy(confusion_matrix(label, torch.argmax(pred,1), \
                    labels=[torch.tensor(0), torch.tensor(1), torch.tensor(2)])).float()
            total_loss += loss.item()
            total_acc += acc.item() 
            count += len(label)

        return total_loss/len(batches), total_acc/count, cm
开发者ID:bearcave9,项目名称:Weekend-Projects,代码行数:19,代码来源:AOA_LSTM.py



注:本文中的torch.argmax函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。


鲜花

握手

雷人

路过

鸡蛋
该文章已有0人参与评论

请发表评论

全部评论

专题导读
上一篇:
Python torch.bmm函数代码示例发布时间:2022-05-27
下一篇:
Python torch.arange函数代码示例发布时间:2022-05-27
热门推荐
阅读排行榜

扫描微信二维码

查看手机版网站

随时了解更新最新资讯

139-2527-9053

在线客服(服务时间 9:00~18:00)

在线QQ客服
地址:深圳市南山区西丽大学城创智工业园
电邮:jeky_zhao#qq.com
移动电话:139-2527-9053

Powered by 互联科技 X3.4© 2001-2213 极客世界.|Sitemap