• 设为首页
  • 点击收藏
  • 手机版
    手机扫一扫访问
    迪恩网络手机版
  • 关注官方公众号
    微信扫一扫关注
    公众号

Python torch.max函数代码示例

原作者: [db:作者] 来自: [db:来源] 收藏 邀请

本文整理汇总了Python中torch.max函数的典型用法代码示例。如果您正苦于以下问题:Python max函数的具体用法?Python max怎么用?Python max使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。



在下文中一共展示了max函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。

示例1: script_viterbi

def script_viterbi(unary, trans, start_idx, end_idx):
    # type: (Tensor, Tensor, int, int) -> Tuple[Tensor, Tensor]
    backpointers = []
    alphas = torch.full((1, unary.size(1)), -1e4, dtype=unary.dtype, device=unary.device)
    alphas[0, start_idx] = 0

    for i in range(unary.size(0)):
        unary_t = unary[i, :]
        next_tag_var = alphas + trans
        viterbi, best_tag_ids = torch.max(next_tag_var, 1)
        backpointers.append(best_tag_ids)
        alphas = viterbi + unary_t
        alphas = alphas.unsqueeze(0)

    terminal_vars = alphas.squeeze(0) + trans[end_idx, :]
    path_score, best_tag_id = torch.max(terminal_vars, 0)

    best_path = [best_tag_id]
    for i in range(len(backpointers)):
        i = len(backpointers) - i - 1
        best_tag_id = backpointers[i][best_tag_id]
        best_path.append(best_tag_id)

    new_path = []
    for i in range(len(best_path)):
        i = len(best_path) - i - 1
        new_path.append(best_path[i])
    return torch.stack(new_path[1:]), path_score
开发者ID:dpressel,项目名称:baseline,代码行数:28,代码来源:tagger_decoders.py


示例2: forward

    def forward(self, features, rois):
        batch_size, num_channels, data_height, data_width = features.size()
        num_rois = rois.size()[0]
        outputs = Variable(torch.zeros(num_rois, num_channels, self.pooled_height, self.pooled_width)).cuda()

        for roi_ind, roi in enumerate(rois):
            batch_ind = int(roi[0].data[0])
            roi_start_w, roi_start_h, roi_end_w, roi_end_h = np.round(
                roi[1:].data.cpu().numpy() * self.spatial_scale).astype(int)
            roi_width = max(roi_end_w - roi_start_w + 1, 1)
            roi_height = max(roi_end_h - roi_start_h + 1, 1)
            bin_size_w = float(roi_width) / float(self.pooled_width)
            bin_size_h = float(roi_height) / float(self.pooled_height)

            for ph in range(self.pooled_height):
                hstart = int(np.floor(ph * bin_size_h))
                hend = int(np.ceil((ph + 1) * bin_size_h))
                hstart = min(data_height, max(0, hstart + roi_start_h))
                hend = min(data_height, max(0, hend + roi_start_h))
                for pw in range(self.pooled_width):
                    wstart = int(np.floor(pw * bin_size_w))
                    wend = int(np.ceil((pw + 1) * bin_size_w))
                    wstart = min(data_width, max(0, wstart + roi_start_w))
                    wend = min(data_width, max(0, wend + roi_start_w))

                    is_empty = (hend <= hstart) or(wend <= wstart)
                    if is_empty:
                        outputs[roi_ind, :, ph, pw] = 0
                    else:
                        data = features[batch_ind]
                        outputs[roi_ind, :, ph, pw] = torch.max(
                            torch.max(data[:, hstart:hend, wstart:wend], 1)[0], 2)[0].view(-1)

        return outputs
开发者ID:mutual-ai,项目名称:faster-rcnn.pytorch,代码行数:34,代码来源:roi_pool_py.py


示例3: predictive_elbo

    def predictive_elbo(self, x, k, s):
        # No pW or qW

        self.B = x.size()[0] #batch size
        # self.k = k  #number of z samples aka particles P
        # self.s = s  #number of W samples

        elbo1s = []
        for i in range(s):

            Ws, logpW, logqW = self.sample_W()  #_ , [1], [1]

            mu, logvar = self.encode(x)  #[B,Z]
            z, logpz, logqz = self.sample_z(mu, logvar, k=k) #[P,B,Z], [P,B]

            x_hat = self.decode(Ws, z) #[P,B,X]
            logpx = log_bernoulli(x_hat, x)  #[P,B]

            elbo = logpx + logpz - logqz #[P,B]
            if k>1:
                max_ = torch.max(elbo, 0)[0] #[B]
                elbo = torch.log(torch.mean(torch.exp(elbo - max_), 0)) + max_ #[B]
            # elbo1 = elbo1 #+ (logpW - logqW)*.00000001 #[B], logp(x|W)p(w)/q(w)
            elbo1s.append(elbo)

        elbo1s = torch.stack(elbo1s) #[S,B]
        if s>1:
            max_ = torch.max(elbo1s, 0)[0] #[B]
            elbo1 = torch.log(torch.mean(torch.exp(elbo1s - max_), 0)) + max_ #[B]            

        elbo = torch.mean(elbo1s) #[1]
        return elbo#, logprobs2[0], logprobs2[1], logprobs2[2], logprobs2[3], logprobs2[4]
开发者ID:chriscremer,项目名称:Other_Code,代码行数:32,代码来源:bvae_pytorch4_plot_true_posterior.py


示例4: bbox_ious

def bbox_ious(boxes1, boxes2, x1y1x2y2=True):
    if x1y1x2y2:
        mx = torch.min(boxes1[0], boxes2[0])
        Mx = torch.max(boxes1[2], boxes2[2])
        my = torch.min(boxes1[1], boxes2[1])
        My = torch.max(boxes1[3], boxes2[3])
        w1 = boxes1[2] - boxes1[0]
        h1 = boxes1[3] - boxes1[1]
        w2 = boxes2[2] - boxes2[0]
        h2 = boxes2[3] - boxes2[1]
    else:
        mx = torch.min(boxes1[0]-boxes1[2]/2.0, boxes2[0]-boxes2[2]/2.0)
        Mx = torch.max(boxes1[0]+boxes1[2]/2.0, boxes2[0]+boxes2[2]/2.0)
        my = torch.min(boxes1[1]-boxes1[3]/2.0, boxes2[1]-boxes2[3]/2.0)
        My = torch.max(boxes1[1]+boxes1[3]/2.0, boxes2[1]+boxes2[3]/2.0)
        w1 = boxes1[2]
        h1 = boxes1[3]
        w2 = boxes2[2]
        h2 = boxes2[3]
    uw = Mx - mx
    uh = My - my
    cw = w1 + w2 - uw
    ch = h1 + h2 - uh
    mask = ((cw <= 0) + (ch <= 0) > 0)
    area1 = w1 * h1
    area2 = w2 * h2
    carea = cw * ch
    carea[mask] = 0
    uarea = area1 + area2 - carea
    return carea/uarea
开发者ID:dyz-zju,项目名称:MVision,代码行数:30,代码来源:utils.py


示例5: bbox_overlaps

def bbox_overlaps(boxes, query_boxes):
    """
    Parameters
    ----------
    boxes: (N, 4) ndarray or tensor or variable
    query_boxes: (K, 4) ndarray or tensor or variable
    Returns
    -------
    overlaps: (N, K) overlap between boxes and query_boxes
    """
    if isinstance(boxes, np.ndarray):
        boxes = torch.from_numpy(boxes)
        query_boxes = torch.from_numpy(query_boxes)
        out_fn = lambda x: x.numpy()  # If input is ndarray, turn the overlaps back to ndarray when return
    else:
        out_fn = lambda x: x

    box_areas = (boxes[:, 2] - boxes[:, 0] + 1) * \
            (boxes[:, 3] - boxes[:, 1] + 1)
    query_areas = (query_boxes[:, 2] - query_boxes[:, 0] + 1) * \
            (query_boxes[:, 3] - query_boxes[:, 1] + 1)

    iw = (torch.min(boxes[:, 2:3], query_boxes[:, 2:3].t()) - torch.max(
        boxes[:, 0:1], query_boxes[:, 0:1].t()) + 1).clamp(min=0)
    ih = (torch.min(boxes[:, 3:4], query_boxes[:, 3:4].t()) - torch.max(
        boxes[:, 1:2], query_boxes[:, 1:2].t()) + 1).clamp(min=0)
    ua = box_areas.view(-1, 1) + query_areas.view(1, -1) - iw * ih
    overlaps = iw * ih / ua
    return out_fn(overlaps)
开发者ID:zvant,项目名称:pytorch-faster-rcnn,代码行数:29,代码来源:bbox.py


示例6: bbox_iou

def bbox_iou(box1, box2):
    """
    Returns the IoU of two bounding boxes 
    
    
    """
    #Get the coordinates of bounding boxes
    b1_x1, b1_y1, b1_x2, b1_y2 = box1[:,0], box1[:,1], box1[:,2], box1[:,3]
    b2_x1, b2_y1, b2_x2, b2_y2 = box2[:,0], box2[:,1], box2[:,2], box2[:,3]
    
    #get the corrdinates of the intersection rectangle
    inter_rect_x1 =  torch.max(b1_x1, b2_x1)
    inter_rect_y1 =  torch.max(b1_y1, b2_y1)
    inter_rect_x2 =  torch.min(b1_x2, b2_x2)
    inter_rect_y2 =  torch.min(b1_y2, b2_y2)
    
    #Intersection area
    inter_area = torch.clamp(inter_rect_x2 - inter_rect_x1 + 1, min=0) * torch.clamp(inter_rect_y2 - inter_rect_y1 + 1, min=0)

    #Union Area
    b1_area = (b1_x2 - b1_x1 + 1)*(b1_y2 - b1_y1 + 1)
    b2_area = (b2_x2 - b2_x1 + 1)*(b2_y2 - b2_y1 + 1)
    
    iou = inter_area / (b1_area + b2_area - inter_area)
    
    return iou
开发者ID:Yasuharaaa,项目名称:YOLO_v3_tutorial_from_scratch,代码行数:26,代码来源:util.py


示例7: log_sum_exp

def log_sum_exp(x):
    """ numerically stable log_sum_exp implementation that prevents overflow """
    # TF ordering
    axis  = len(x.size()) - 1
    m, _  = torch.max(x, dim=axis)
    m2, _ = torch.max(x, dim=axis, keepdim=True)
    return m + torch.log(torch.sum(torch.exp(x - m2), dim=axis))
开发者ID:insperatum,项目名称:vhe,代码行数:7,代码来源:utils.py


示例8: bbox_iou

def bbox_iou(box1, box2, x1y1x2y2=True):
    """
    Returns the IoU of two bounding boxes
    """
    if not x1y1x2y2:
        # Transform from center and width to exact coordinates
        b1_x1, b1_x2 = box1[:, 0] - box1[:, 2] / 2, box1[:, 0] + box1[:, 2] / 2
        b1_y1, b1_y2 = box1[:, 1] - box1[:, 3] / 2, box1[:, 1] + box1[:, 3] / 2
        b2_x1, b2_x2 = box2[:, 0] - box2[:, 2] / 2, box2[:, 0] + box2[:, 2] / 2
        b2_y1, b2_y2 = box2[:, 1] - box2[:, 3] / 2, box2[:, 1] + box2[:, 3] / 2
    else:
        # Get the coordinates of bounding boxes
        b1_x1, b1_y1, b1_x2, b1_y2 = box1[:, 0], box1[:, 1], box1[:, 2], box1[:, 3]
        b2_x1, b2_y1, b2_x2, b2_y2 = box2[:, 0], box2[:, 1], box2[:, 2], box2[:, 3]

    # get the corrdinates of the intersection rectangle
    inter_rect_x1 = torch.max(b1_x1, b2_x1)
    inter_rect_y1 = torch.max(b1_y1, b2_y1)
    inter_rect_x2 = torch.min(b1_x2, b2_x2)
    inter_rect_y2 = torch.min(b1_y2, b2_y2)
    # Intersection area
    inter_area = torch.clamp(inter_rect_x2 - inter_rect_x1 + 1, min=0) * torch.clamp(
        inter_rect_y2 - inter_rect_y1 + 1, min=0
    )
    # Union Area
    b1_area = (b1_x2 - b1_x1 + 1) * (b1_y2 - b1_y1 + 1)
    b2_area = (b2_x2 - b2_x1 + 1) * (b2_y2 - b2_y1 + 1)

    iou = inter_area / (b1_area + b2_area - inter_area + 1e-16)

    return iou
开发者ID:cf904c27,项目名称:PyTorch-YOLOv3,代码行数:31,代码来源:utils.py


示例9: rmac

def rmac(features, rmac_levels, pca=None):
    nim, nc, xd, yd = features.size()

    rmac_regions = image_helper.get_rmac_region_coordinates(xd, yd, rmac_levels)
    rmac_regions = rmac_regions.astype(np.int)
    nr = len(rmac_regions)

    rmac_descriptors = []
    for x0, y0, w, h in rmac_regions:
        desc = features[:, :, y0:y0 + h, x0:x0 + w]
        desc = torch.max(desc, 2, keepdim=True)[0]
        desc = torch.max(desc, 3, keepdim=True)[0]
        # insert an additional dimension for the cat to work
        rmac_descriptors.append(desc.view(-1, 1, nc))

    rmac_descriptors = torch.cat(rmac_descriptors, 1)

    rmac_descriptors = normalize_L2(rmac_descriptors, 2)

    if pca is None:
        return rmac_descriptors

    # PCA + whitening
    npca = pca.n_components
    rmac_descriptors = pca.apply(rmac_descriptors.view(nr * nim, nc))
    rmac_descriptors = normalize_L2(rmac_descriptors, 1)

    rmac_descriptors = rmac_descriptors.view(nim, nr, npca)

    # Sum aggregation and L2-normalization
    rmac_descriptors = torch.sum(rmac_descriptors, 1)
    rmac_descriptors = normalize_L2(rmac_descriptors, 1)
    return rmac_descriptors
开发者ID:gsx0,项目名称:deepcluster,代码行数:33,代码来源:eval_retrieval.py


示例10: forward

    def forward(self, features, rois):
        self.batch_size, self.num_channels, self.data_width, self.data_height, self.data_length = features.size()
        self.num_rois = rois.size()[0]
        self.remember_for_backward = torch.zeros(self.num_rois, self.num_channels, self.pooled_width, self.pooled_height, self.pooled_length, 3) - 1
        outputs = torch.zeros(self.num_rois, self.num_channels, self.pooled_width, self.pooled_height, self.pooled_length)

        for roi_ind, roi in enumerate(rois):
            
            roi_start_w, roi_start_h, roi_start_l, roi_end_w, roi_end_h, roi_end_l = roi.cpu().numpy() * self.spatial_scale

            roi_start_w = int(math.floor(roi_start_w))
            roi_start_h = int(math.floor(roi_start_h))
            roi_start_l = int(math.floor(roi_start_l))

            roi_end_w = int(math.ceil(roi_end_w))
            roi_end_h = int(math.ceil(roi_end_h))
            roi_end_l = int(math.ceil(roi_end_l))

            roi_width = max(roi_end_w - roi_start_w, 1)
            roi_height = max(roi_end_h - roi_start_h, 1)
            roi_length = max(roi_end_l - roi_start_l, 1)
            #roi_width = roi_end_w - roi_start_w
            #roi_height = roi_end_h - roi_start_h
            #roi_length = roi_end_l - roi_start_l
            #if roi_width < 1 or roi_height < 1 or roi_length < 1:
            #    continue

            bin_size_w = float(roi_width) / float(self.pooled_width)
            bin_size_h = float(roi_height) / float(self.pooled_height)
            bin_size_l = float(roi_length) / float(self.pooled_length)

            for pw in range(self.pooled_width):
                for ph in range(self.pooled_height):
                    for pl in range(self.pooled_length):
                        wstart = int(np.floor(pw * bin_size_w))
                        hstart = int(np.floor(ph * bin_size_h))
                        lstart = int(np.floor(pl * bin_size_l))

                        wend = int(np.ceil((pw + 1) * bin_size_w))
                        hend = int(np.ceil((ph + 1) * bin_size_h))
                        lend = int(np.ceil((pl + 1) * bin_size_l))

                        wstart = min(self.data_width, max(0, wstart + roi_start_w))
                        hstart = min(self.data_height, max(0, hstart + roi_start_h))
                        lstart = min(self.data_length, max(0, lstart + roi_start_l))

                        wend = min(self.data_width, max(0, wend + roi_start_w))
                        hend = min(self.data_height, max(0, hend + roi_start_h))
                        lend = min(self.data_length, max(0, lend + roi_start_l))

                        is_empty = (hend <= hstart) or(wend <= wstart) or (lend <= lstart)
                        if is_empty:
                            outputs[roi_ind, :, pw, ph, pl] = 0
                        else:
                            data = features[0]
                            outputs[roi_ind, :, pw, ph, pl] = torch.max(torch.max(torch.max(data[:, wstart:wend, hstart:hend, lstart:lend], 1)[0], 1)[0], 1)[0].view(-1)
                            for c in range(self.num_channels):
                                ind_w, ind_h, ind_l = np.unravel_index(data[c, wstart:wend, hstart:hend, lstart:lend].numpy().argmax(), data[c, wstart:wend, hstart:hend, lstart:lend].numpy().shape)
                                self.remember_for_backward[roi_ind, c, pw, ph, pl] = torch.from_numpy(np.array([ind_w+wstart, ind_h+hstart, ind_l+lstart])).float()
        return outputs
开发者ID:caskeep,项目名称:3D-SIS,代码行数:60,代码来源:roi_pool.py


示例11: test

def test(net, dataloader, tag=''):
    correct = 0
    total = 0
    if tag == 'Train':
        dataTestLoader = dataloader.trainloader
    else:
        dataTestLoader = dataloader.testloader
    with torch.no_grad():
        for data in dataTestLoader:
            images, labels = data
            outputs = net(images)
            _, predicted = torch.max(outputs.data, 1)
            total += labels.size(0)
            correct += (predicted == labels).sum().item()

    net.log('%s Accuracy of the network: %d %%' % (tag,
        100 * correct / total))

    class_correct = list(0. for i in range(10))
    class_total = list(0. for i in range(10))
    with torch.no_grad():
        for data in dataTestLoader:
            images, labels = data
            outputs = net(images)
            _, predicted = torch.max(outputs, 1)
            c = (predicted == labels).squeeze()
            for i in range(len(labels)):
                label = labels[i]
                class_correct[label] += c[i].item()
                class_total[label] += 1


    for i in range(10):
        net.log('%s Accuracy of %5s : %2d %%' % (
            tag, dataloader.classes[i], 100 * class_correct[i] / class_total[i]))
开发者ID:emoyers,项目名称:vision-hw5,代码行数:35,代码来源:main.py


示例12: forward

    def forward(self, y_pred, y_true, eps=1e-6):
        return NotImplementedError

        torch.nn.modules.loss._assert_no_grad(y_true)

        assert y_pred.shape[1] == 2

        same_left = torch.stack([y_true[:, 0], y_pred[:, 0]], dim=1)
        same_left, _ = torch.max(same_left, dim=1)

        same_right = torch.stack([y_true[:, 1], y_pred[:, 1]], dim=1)
        same_right, _ = torch.min(same_right, dim=1)

        same_len = same_right - same_left + 1   # (batch_size,)
        same_len = torch.stack([same_len, torch.zeros_like(same_len)], dim=1)
        same_len, _ = torch.max(same_len, dim=1)

        same_len = same_len.type(torch.float)

        pred_len = (y_pred[:, 1] - y_pred[:, 0] + 1).type(torch.float)
        true_len = (y_true[:, 1] - y_true[:, 0] + 1).type(torch.float)

        pre = same_len / (pred_len + eps)
        rec = same_len / (true_len + eps)

        f1 = 2 * pre * rec / (pre + rec + eps)

        return -torch.mean(f1)
开发者ID:SerenaKhoo,项目名称:Match-LSTM,代码行数:28,代码来源:loss.py


示例13: mio_module

 def mio_module(self, each_mmbox, len_conf):
     chunk = torch.chunk(each_mmbox, each_mmbox.shape[1], 1)
     bmax  = torch.max(torch.max(chunk[0], chunk[1]), chunk[2])
     cls = ( torch.cat([bmax,chunk[3]], dim=1) if len_conf==0 else torch.cat([chunk[3],bmax],dim=1) )
     if len(chunk)==6:
         cls = torch.cat([cls, chunk[4], chunk[5]], dim=1) 
     elif len(chunk)==8:
         cls = torch.cat([cls, chunk[4], chunk[5], chunk[6], chunk[7]], dim=1) 
     return cls 
开发者ID:UGuess,项目名称:FaceDetection-DSFD,代码行数:9,代码来源:face_ssd.py


示例14: forward

    def forward(self, match_encoders):
        
        '''
        match_encoders (pn_steps, batch, hidden_size*2)
        '''
        vh_matrix = self.vh_net(match_encoders) # pn_steps, batch, hidden_size
        
        # prediction start
        h0 = Variable(torch.zeros(match_encoders.size(1), self.hidden_size)).cuda()
        c0 = Variable(torch.zeros(match_encoders.size(1), self.hidden_size)).cuda()
        
        wha1 = self.wa_net(h0) # bacth, hidden_size
        wha1 = wha1.expand(match_encoders.size(0), wha1.size(0), wha1.size(1)) # pn_steps, batch, hidden_size
        #print ('_sum.size() ', _sum.size())
        #print ('vh_matrix.size() ', vh_matrix.size())
        f1 = self.tanh(vh_matrix + wha1) # pn_steps, batch, hidden_size
        #print ('f1.size() ', f1.size())
        vf1 = self.v_net(f1.transpose(0, 1)).squeeze(-1) #batch, pn_steps
        
        beta1 = self.softmax(vf1) #batch, pn_steps
        softmax_beta1 = self.softmax(beta1).view(beta1.size(0), 1, beta1.size(1)) #batch, 1, pn_steps
        
        inp = torch.bmm(softmax_beta1, match_encoders.transpose(0, 1)) # bacth, 1, hidden_size
        inp = inp.squeeze(1) # bacth, hidden_size
        
        h1, c1 = self.pointer_lstm(inp, (h0, c0))
        
        
        wha2 = self.wa_net(h1) # bacth, hidden_size
        wha2 = wha2.expand(match_encoders.size(0), wha2.size(0), wha2.size(1)) # pn_steps, batch, hidden_size
        f2 = self.tanh(vh_matrix + wha2) # pn_steps, batch, hidden_size
        vf2 = self.v_net(f2.transpose(0, 1)).squeeze(-1) #batch, pn_steps
        
        beta2 = self.softmax(vf2)#batch, pn_steps
        softmax_beta2 = self.softmax(beta2).view(beta2.size(0), 1, beta2.size(1)) #batch, 1, pn_steps
        
        inp = torch.bmm(softmax_beta2, match_encoders.transpose(0, 1)) # bacth, 1, hidden_size
        inp = inp.squeeze(1) # bacth, hidden_size
        
        h2, c2 = self.pointer_lstm(inp, (h1, c1))
            
        _, start = torch.max(beta1, 1)
        _, end = torch.max(beta2, 1)
        
        beta1 = beta1.view(1, beta1.size(0), beta1.size(1))
        beta2 = beta2.view(1, beta2.size(0), beta2.size(1))
        
        logits = torch.cat([beta1, beta2])
        
        start = start.view(1, start.size(0))
        end = end.view(1, end.size(0))
        
        prediction = torch.cat([start, end]).transpose(0, 1).cpu().data.numpy()
        

        return logits, prediction
开发者ID:xuwenshen,项目名称:Reading_Comprehension,代码行数:56,代码来源:linear_match_lstm.py


示例15: argmax

def argmax(vec):
	"""
	Returns the arg max as an int
	"""
	if len(vec.size()) == 1:
		_, idx = torch.max(vec, 0)
	else:
		_, idx = torch.max(vec, 1)

	return idx.item()
开发者ID:jworr,项目名称:ml_tools,代码行数:10,代码来源:classifier.py


示例16: updateOutput

    def updateOutput(self, input):
        self._lazyInit()
        dimension = self._getPositiveDimension(input)
        torch.max(input, dimension, out=(self._output, self._indices), keepdim=True)
        if input.dim() > 1:
            self.output.set_(self._output.select(dimension, 0))
        else:
            self.output.set_(self._output)

        return self.output
开发者ID:Northrend,项目名称:pytorch,代码行数:10,代码来源:Max.py


示例17: dec

    def dec(self, encoders, decoder_inputs, is_teacher_forcing, max_question_len):
        
        '''
        encoders (batch, hidden_size)
        if is_teacher_forcing: decoder_inputs (batch, max_question_len)
        if not is_teacher_forcing: decoder_inputs (batch, 1)
        '''
        decoder_inputs = Variable(decoder_inputs).long().cuda()
        decoder_inputs = self.embedding(decoder_inputs)
        decoder_inputs = decoder_inputs.transpose(0, 1)
        
        encoders = encoders.expand(decoder_inputs.size(0), encoders.size(0), self.hidden_size*2)
        inputs = torch.cat([decoder_inputs, encoders], -1)
        
        if is_teacher_forcing:
            
            outputs, hidden = self.dec_net(inputs)
            outputs = self.dropout(outputs)
            logits = self.fc_net(outputs) # qn_steps, batch, voc_size
            
            _, predictions = torch.max(logits.transpose(0, 1), -1) #batch, qn_steps
            predictions = predictions.cpu().data.numpy()
            
        else:
            logits = [0 for i in range(max_question_len)]
            predictions = [0 for i in range(max_question_len)]
            
            output, hidden = self.dec_net(inputs)
            output = self.dropout(output)
            logits[0] = self.fc_net(output)
            
            _, index = torch.max(logits[0])
            
            logits[0] = logits[0].view(1, decoder_inputs.size(1), self.voc_size) # 1,batch_size, voc_size
            predictions[0] = index.cpu().data.numpy() # batch_size
            
            for i in range(1, max_question_len):
                
                prev_output = Variable(predictions[i-1]).long().cuda()
                prev_output = self.embedding(prev_output)
                inputs = torch.cat([prev_output, encoders[0]], -1)
                
                output, hidden = self.dec_net(inputs, hidden)
                output = self.dropout(output)
                logits[i] = self.fc_net(output)

                _, index = torch.max(logits[i])
                
                logits[i] = logits[i].view(1, decoder_inputs.size(0), self.voc_size) # 1,batch_size, voc_size
                predictions[i] = index.cpu().data.numpy() # batch_size
            
            logits = torch.cat(logits)# qn_steps, batch, voc_size
            predictions = np.array(predictions).transpose(1, 0)
            
        return logits, predictions
开发者ID:xuwenshen,项目名称:Reading_Comprehension,代码行数:55,代码来源:understand_passage.py


示例18: eval_loss

def eval_loss(net, criterion, loader, use_cuda=False):
    """
    Evaluate the loss value for a given 'net' on the dataset provided by the loader.

    Args:
        net: the neural net model
        criterion: loss function
        loader: dataloader
        use_cuda: use cuda or not
    Returns:
        loss value and accuracy
    """
    correct = 0
    total_loss = 0
    total = 0 # number of samples
    num_batch = len(loader)

    if use_cuda:
        net.cuda()
    net.eval()

    with torch.no_grad():
        if isinstance(criterion, nn.CrossEntropyLoss):
            for batch_idx, (inputs, targets) in enumerate(loader):
                batch_size = inputs.size(0)
                total += batch_size
                inputs = Variable(inputs)
                targets = Variable(targets)
                if use_cuda:
                    inputs, targets = inputs.cuda(), targets.cuda()
                outputs = net(inputs)
                loss = criterion(outputs, targets)
                total_loss += loss.item()*batch_size
                _, predicted = torch.max(outputs.data, 1)
                correct += predicted.eq(targets).sum().item()

        elif isinstance(criterion, nn.MSELoss):
            for batch_idx, (inputs, targets) in enumerate(loader):
                batch_size = inputs.size(0)
                total += batch_size
                inputs = Variable(inputs)

                one_hot_targets = torch.FloatTensor(batch_size, 10).zero_()
                one_hot_targets = one_hot_targets.scatter_(1, targets.view(batch_size, 1), 1.0)
                one_hot_targets = one_hot_targets.float()
                one_hot_targets = Variable(one_hot_targets)
                if use_cuda:
                    inputs, one_hot_targets = inputs.cuda(), one_hot_targets.cuda()
                outputs = F.softmax(net(inputs))
                loss = criterion(outputs, one_hot_targets)
                total_loss += loss.item()*batch_size
                _, predicted = torch.max(outputs.data, 1)
                correct += predicted.cpu().eq(targets).sum().item()

    return total_loss/total, 100.*correct/total
开发者ID:anirband,项目名称:loss-landscape,代码行数:55,代码来源:evaluation.py


示例19: blur_frame

    def blur_frame(self,frame):


        aaa = 0

        if aaa ==0:

            if torch.max(frame) > 1.:
                print ('DDDDDDDD')
                print (torch.max(frame).data.cpu().numpy())
                fasdf

            K = 21 #11 #21
            padding = 10# 5
            filter_weights = torch.ones(1,1,K,K).cuda()

            filter_weights = filter_weights / K**2
            frame_c0 = frame[:,0].unsqueeze(1)
            # print (frame_c0.shape)
            frame_c0 = F.conv2d(input=frame_c0, weight=filter_weights, bias=None, padding=padding, stride=1, dilation=1)
            # print (frame_c0.size())
            # print ('Output: [B,outC,outH,outW]')
            # print ()

            # print (torch.max(frame_c0).data.cpu().numpy())

            frame_c1 = frame[:,1].unsqueeze(1)
            frame_c1 = F.conv2d(input=frame_c1, weight=filter_weights, bias=None, padding=padding, stride=1, dilation=1)

            # print (torch.max(frame_c1).data.cpu().numpy())


            frame_c2 = frame[:,2].unsqueeze(1)
            frame_c2 = F.conv2d(input=frame_c2, weight=filter_weights, bias=None, padding=padding, stride=1, dilation=1)

            # print (torch.max(frame_c2).data.cpu().numpy())
            # fdsfa

            blurred_image = [frame_c0, frame_c1, frame_c2]
            blurred_image = torch.stack(blurred_image, dim=1)

            # print (blurred_image.shape)

            blurred_image = blurred_image.squeeze(dim=2)  #[B,3,480,640]

            # blurred_image = blurred_image / torch.max(blurred_image)
            blurred_image = torch.clamp(blurred_image, max=1.0)

            # print (torch.max(blurred_image).data.cpu().numpy())
            # fas

        else:
            blurred_image = torch.zeros(frame.size()[0],3,480,640).cuda()

        return blurred_image
开发者ID:chriscremer,项目名称:Other_Code,代码行数:55,代码来源:vae_doom_withblur.py


示例20: forward

    def forward(self, sent_tuple):
        # sent_len: [max_len, ..., min_len] (batch)
        # sent: Variable(seqlen x batch x worddim)

        sent, sent_len = sent_tuple
        bsize = sent.size(1)

        self.init_lstm = self.init_lstm if bsize == self.init_lstm.size(1) else \
                Variable(torch.FloatTensor(2, bsize, self.enc_lstm_dim).zero_()).cuda()

        # Sort by length (keep idx)
        sent_len, idx_sort = np.sort(sent_len)[::-1], np.argsort(-sent_len)
        sent = sent.index_select(1, Variable(torch.cuda.LongTensor(idx_sort)))
        # Handling padding in Recurrent Networks
        sent_packed = nn.utils.rnn.pack_padded_sequence(sent, sent_len)
        sent_output = self.enc_lstm(sent_packed,
                                    (self.init_lstm, self.init_lstm))[0]
        # seqlen x batch x 2*nhid
        sent_output = nn.utils.rnn.pad_packed_sequence(sent_output)[0]
        # Un-sort by length
        idx_unsort = np.argsort(idx_sort)
        sent_output = sent_output.index_select(1,
            Variable(torch.cuda.LongTensor(idx_unsort)))

        sent_output = sent_output.transpose(0,1).contiguous()

        sent_output_proj = self.proj_lstm(sent_output.view(-1,
            2*self.enc_lstm_dim)).view(bsize, -1, 2*self.enc_lstm_dim)

        sent_keys = self.proj_enc(sent_output.view(-1,
            2*self.enc_lstm_dim)).view(bsize, -1, 2*self.enc_lstm_dim)

        sent_max = torch.max(sent_output, 1)[0].squeeze(1)  # (bsize, 2*nhid)
        sent_summary = self.proj_query(
                       sent_max).unsqueeze(1).expand_as(sent_keys)
        # (bsize, seqlen, 2*nhid)

        sent_M = torch.tanh(sent_keys + sent_summary)
        # (bsize, seqlen, 2*nhid) YANG : M = tanh(Wh_i + Wh_avg
        sent_w = self.query_embedding(Variable(torch.LongTensor(
            bsize*[0]).cuda())).unsqueeze(2)  # (bsize, 2*nhid, 1)

        sent_alphas = self.softmax(sent_M.bmm(sent_w).squeeze(2)).unsqueeze(1)
        # (bsize, 1, seqlen)

        if int(time.time()) % 200 == 0:
            print('w', torch.max(sent_w[0]), torch.min(sent_w[0]))
            print('alphas', sent_alphas[0][0][0:sent_len[0]])
        # Get attention vector
        emb = sent_alphas.bmm(sent_output_proj).squeeze(1)

        return emb
开发者ID:pemazare,项目名称:InferSent,代码行数:52,代码来源:models.py



注:本文中的torch.max函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。


鲜花

握手

雷人

路过

鸡蛋
该文章已有0人参与评论

请发表评论

全部评论

专题导读
上一篇:
Python torch.mean函数代码示例发布时间:2022-05-27
下一篇:
Python torch.matmul函数代码示例发布时间:2022-05-27
热门推荐
阅读排行榜

扫描微信二维码

查看手机版网站

随时了解更新最新资讯

139-2527-9053

在线客服(服务时间 9:00~18:00)

在线QQ客服
地址:深圳市南山区西丽大学城创智工业园
电邮:jeky_zhao#qq.com
移动电话:139-2527-9053

Powered by 互联科技 X3.4© 2001-2213 极客世界.|Sitemap