• 设为首页
  • 点击收藏
  • 手机版
    手机扫一扫访问
    迪恩网络手机版
  • 关注官方公众号
    微信扫一扫关注
    公众号

Python torch.arange函数代码示例

原作者: [db:作者] 来自: [db:来源] 收藏 邀请

本文整理汇总了Python中torch.arange函数的典型用法代码示例。如果您正苦于以下问题:Python arange函数的具体用法?Python arange怎么用?Python arange使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。



在下文中一共展示了arange函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。

示例1: test_broadcast_subspace

 def test_broadcast_subspace(self):
     a = zeros((100, 100))
     v = Variable(torch.arange(0, 100))[:, None]
     b = Variable(torch.arange(99, -1, -1).long())
     a[b] = v
     expected = b.double().unsqueeze(1).expand(100, 100)
     self.assertEqual(a, expected)
开发者ID:bhuWenDongchao,项目名称:pytorch,代码行数:7,代码来源:test_indexing.py


示例2: __call__

    def __call__(self, spec_f):

        spec_f, is_variable = _check_is_variable(spec_f)
        n_fft = spec_f.size(2)

        m_min = 0. if self.f_min == 0 else 2595 * np.log10(1. + (self.f_min / 700))
        m_max = 2595 * np.log10(1. + (self.f_max / 700))

        m_pts = torch.linspace(m_min, m_max, self.n_mels + 2)
        f_pts = (700 * (10**(m_pts / 2595) - 1))

        bins = torch.floor(((n_fft - 1) * 2) * f_pts / self.sr).long()

        fb = torch.zeros(n_fft, self.n_mels)
        for m in range(1, self.n_mels + 1):
            f_m_minus = bins[m - 1].item()
            f_m = bins[m].item()
            f_m_plus = bins[m + 1].item()

            if f_m_minus != f_m:
                fb[f_m_minus:f_m, m - 1] = (torch.arange(f_m_minus, f_m) - f_m_minus) / (f_m - f_m_minus)
            if f_m != f_m_plus:
                fb[f_m:f_m_plus, m - 1] = (f_m_plus - torch.arange(f_m, f_m_plus)) / (f_m_plus - f_m)

        fb = Variable(fb)
        spec_m = torch.matmul(spec_f, fb)  # (c, l, n_fft) dot (n_fft, n_mels) -> (c, l, n_mels)
        return spec_m if is_variable else spec_m.data
开发者ID:SsnL,项目名称:audio,代码行数:27,代码来源:transforms.py


示例3: __call__

    def __call__(self, grid):
        batch_size, _, grid_dimX, grid_dimY, grid_dimZ = grid.size()

        k = 1.0

        x_coords = 2.0 * k * torch.arange(grid_dimX, dtype=torch.float32).unsqueeze(1).unsqueeze(1
            ).expand(grid_dimX, grid_dimY, grid_dimZ) / (grid_dimX - 1.0) - 1.0
        y_coords = 2.0 * k * torch.arange(grid_dimY, dtype=torch.float32).unsqueeze(1).unsqueeze(0
            ).expand(grid_dimX, grid_dimY, grid_dimZ) / (grid_dimY - 1.0) - 1.0
        z_coords = 2.0 * k * torch.arange(grid_dimZ, dtype=torch.float32).unsqueeze(0).unsqueeze(0
            ).expand(grid_dimX, grid_dimY, grid_dimZ) / (grid_dimZ - 1.0) - 1.0

        coords = torch.stack((x_coords, y_coords, z_coords), dim=0)

        if self.with_r:
            rs = ((x_coords ** 2) + (y_coords ** 2) + (z_coords ** 2)) ** 0.5
            rs = k * rs / torch.max(rs)
            rs = torch.unsqueeze(rs, dim=0)
            coords = torch.cat((coords, rs), dim=0)

        coords = torch.unsqueeze(coords, dim=0).repeat(batch_size, 1, 1, 1, 1)

        grid = torch.cat((coords.to(grid.device), grid), dim=1)

        return grid
开发者ID:caskeep,项目名称:3D-SIS,代码行数:25,代码来源:coord_conv3d.py


示例4: meshgrid

def meshgrid(x, y, row_major=True):
    '''Return meshgrid in range x & y.

    Args:
      x: (int) first dim range.
      y: (int) second dim range.
      row_major: (bool) row major or column major.

    Returns:
      (tensor) meshgrid, sized [x*y,2]

    Example:
    >> meshgrid(3,2)
    0  0
    1  0
    2  0
    0  1
    1  1
    2  1
    [torch.FloatTensor of size 6x2]

    >> meshgrid(3,2,row_major=False)
    0  0
    0  1
    0  2
    1  0
    1  1
    1  2
    [torch.FloatTensor of size 6x2]
    '''
    a = torch.arange(0,x)
    b = torch.arange(0,y)
    xx = a.repeat(y).view(-1,1)
    yy = b.view(-1,1).repeat(1,x).view(-1,1)
    return torch.cat([xx,yy],1) if row_major else torch.cat([yy,xx],1)
开发者ID:hopstone,项目名称:pytorch-retinanet,代码行数:35,代码来源:utils.py


示例5: make_positions

def make_positions(tensor, padding_idx, left_pad, onnx_trace=False):
    """Replace non-padding symbols with their position numbers.

    Position numbers begin at padding_idx+1.

    Padding symbols are ignored, but it is necessary to specify whether padding
    is added on the left side (left_pad=True) or right side (left_pad=False).
    """
    if onnx_trace:
        range_buf = torch._dim_arange(like=tensor, dim=1) + padding_idx + 1
        mask = tensor.ne(padding_idx)
        positions = range_buf.expand_as(tensor)
        if left_pad:
            positions = positions - mask.size(1) + mask.long().sum(dim=1).unsqueeze(1)
        return positions * mask.long() + positions * (1 - mask.long())

    max_pos = padding_idx + 1 + tensor.size(1)
    if not hasattr(make_positions, 'range_buf'):
        make_positions.range_buf = tensor.new()
    make_positions.range_buf = make_positions.range_buf.type_as(tensor)
    if make_positions.range_buf.numel() < max_pos:
        torch.arange(padding_idx + 1, max_pos, out=make_positions.range_buf)
    mask = tensor.ne(padding_idx)
    positions = make_positions.range_buf[:tensor.size(1)].expand_as(tensor)
    if left_pad:
        positions = positions - mask.size(1) + mask.long().sum(dim=1).unsqueeze(1)
    return tensor.clone().masked_scatter_(mask, positions[mask])
开发者ID:fyabc,项目名称:fairseq,代码行数:27,代码来源:utils.py


示例6: test_cuda_small_tensors

    def test_cuda_small_tensors(self):
        # Check multiple small tensors which will likely use the same
        # underlying cached allocation
        ctx = mp.get_context('spawn')
        tensors = []
        for i in range(5):
            device = i % 2
            tensors += [torch.arange(i * 5, (i + 1) * 5).cuda(device)]

        inq = ctx.Queue()
        outq = ctx.Queue()
        inq.put(tensors)
        p = ctx.Process(target=sum_tensors, args=(inq, outq))
        p.start()

        results = []
        for i in range(5):
            results.append(outq.get())
        p.join()

        for i, tensor in enumerate(tensors):
            v, device, tensor_size, storage_size = results[i]
            self.assertEqual(v, torch.arange(i * 5, (i + 1) * 5).sum())
            self.assertEqual(device, i % 2)
            self.assertEqual(tensor_size, 5)
            self.assertEqual(storage_size, 5)
开发者ID:Northrend,项目名称:pytorch,代码行数:26,代码来源:test_multiprocessing.py


示例7: test_int_assignment

    def test_int_assignment(self):
        x = Variable(torch.arange(0, 4).view(2, 2))
        x[1] = 5
        self.assertEqual(x.data.tolist(), [[0, 1], [5, 5]])

        x = Variable(torch.arange(0, 4).view(2, 2))
        x[1] = Variable(torch.arange(5, 7))
        self.assertEqual(x.data.tolist(), [[0, 1], [5, 6]])
开发者ID:bhuWenDongchao,项目名称:pytorch,代码行数:8,代码来源:test_indexing.py


示例8: test_int_assignment

    def test_int_assignment(self):
        x = torch.arange(0, 4).view(2, 2)
        x[1] = 5
        self.assertEqual(x.tolist(), [[0, 1], [5, 5]])

        x = torch.arange(0, 4).view(2, 2)
        x[1] = torch.arange(5, 7)
        self.assertEqual(x.tolist(), [[0, 1], [5, 6]])
开发者ID:RichieMay,项目名称:pytorch,代码行数:8,代码来源:test_indexing.py


示例9: test_byte_tensor_assignment

 def test_byte_tensor_assignment(self):
     x = Variable(torch.arange(0, 16).view(4, 4))
     b = Variable(torch.ByteTensor([True, False, True, False]))
     value = Variable(torch.Tensor([3, 4, 5, 6]))
     x[b] = value
     self.assertEqual(x[0], value)
     self.assertEqual(x[1].data, torch.arange(4, 8))
     self.assertEqual(x[2], value)
     self.assertEqual(x[3].data, torch.arange(12, 16))
开发者ID:bhuWenDongchao,项目名称:pytorch,代码行数:9,代码来源:test_indexing.py


示例10: enumerate_support

 def enumerate_support(self):
     total_count = int(self.total_count.max())
     if not self.total_count.min() == total_count:
         raise NotImplementedError("Inhomogeneous total count not supported by `enumerate_support`.")
     values = self._new(1 + total_count,)
     torch.arange(1 + total_count, out=values)
     values = values.view((-1,) + (1,) * len(self._batch_shape))
     values = values.expand((-1,) + self._batch_shape)
     return values
开发者ID:lewisKit,项目名称:pyro,代码行数:9,代码来源:binomial.py


示例11: test_byte_tensor_assignment

 def test_byte_tensor_assignment(self):
     x = torch.arange(0., 16).view(4, 4)
     b = torch.ByteTensor([True, False, True, False])
     value = torch.tensor([3., 4., 5., 6.])
     x[b] = value
     self.assertEqual(x[0], value)
     self.assertEqual(x[1], torch.arange(4, 8))
     self.assertEqual(x[2], value)
     self.assertEqual(x[3], torch.arange(12, 16))
开发者ID:RichieMay,项目名称:pytorch,代码行数:9,代码来源:test_indexing.py


示例12: __init__

 def __init__(self, train_size, batch_size):
     self.num_data = train_size
     self.num_per_batch = int(train_size / batch_size)
     self.batch_size = batch_size
     self.range = torch.arange(0,batch_size).view(1, batch_size).long()
     self.leftover_flag = False
     if train_size % batch_size:
         self.leftover = torch.arange(self.num_per_batch*batch_size, train_size).long()
         self.leftover_flag = True
开发者ID:lianDaniel,项目名称:R-FCN.pytorch,代码行数:9,代码来源:trainval_net.py


示例13: backward

 def backward(ctx, grad_output):
     idx = grad_output.data.new().long()
     torch.arange(0, ctx.input_numel, out=idx)
     idx = idx.view(ctx.input_size)
     idx_unfolded = idx.unfold(ctx.dim, ctx.size, ctx.step)
     idx_unfolded = idx_unfolded.contiguous().view(-1)
     grad_input = Variable(grad_output.data.new(ctx.input_numel).zero_())
     grad_output = grad_output.contiguous().view(-1)
     grad_input = grad_input.index_add(0, Variable(idx_unfolded), grad_output)
     return grad_input.view(ctx.input_size), None, None, None
开发者ID:athiwatp,项目名称:pytorch,代码行数:10,代码来源:tensor.py


示例14: __call__

    def __call__(self, image):

        x_coords = 2.0 * torch.arange(self.image_height).unsqueeze(
            1).expand(self.image_height, self.image_width) / 255.0 - 1.0
        y_coords = 2.0 * torch.arange(self.image_width).unsqueeze(
            0).expand(self.image_height, self.image_width) / 255.0 - 1.0
        coords = torch.stack((x_coords, y_coords), dim=0)

        image = torch.cat((coords, image), dim=0)

        return image
开发者ID:davnov134,项目名称:instance-segmentation-pytorch,代码行数:11,代码来源:preprocess.py


示例15: __init__

    def __init__(self, d_model, dropout, max_len=5000):
        super(PositionalEncoding, self).__init__()
        self.dropout = torch.nn.Dropout(p=dropout)

        pe = torch.zeros(max_len, d_model)
        position = torch.arange(0., max_len).unsqueeze(1)
        div_term = torch.exp(torch.arange(0., d_model, 2) * -(math.log(10000.0) / d_model))
        pe[:, 0::2] = torch.sin(position * div_term)
        pe[:, 1::2] = torch.cos(position * div_term)
        pe = pe.unsqueeze(0)
        self.register_buffer("pe", pe)
开发者ID:daixiangau,项目名称:naacl2019-select-pretraining-data-for-ner,代码行数:11,代码来源:transformer.py


示例16: __init__

 def __init__(self, roi_size = 128, n_segments = 49):
     super().__init__()
     
     self.roi_size = roi_size
     self.n_segments = n_segments
     
     X_grid = torch.arange(0, roi_size).view(1, -1).expand(1, 1, roi_size, roi_size)
     Y_grid = torch.arange(0, roi_size).view(-1, 1).expand(1, 1, roi_size, roi_size)
     
     self.X_grid = nn.Parameter(X_grid.contiguous(), requires_grad=False)
     self.Y_grid = nn.Parameter(Y_grid.contiguous(), requires_grad=False)
开发者ID:ver228,项目名称:Work_In_Progress,代码行数:11,代码来源:worm_models.py


示例17: __init__

    def __init__(self, input_dim: int, max_len: int = 5000) -> None:
        super().__init__()

        # Compute the positional encodings once in log space.
        positional_encoding = torch.zeros(max_len, input_dim, requires_grad=False)
        position = torch.arange(0, max_len).unsqueeze(1).float()
        div_term = torch.exp(torch.arange(0, input_dim, 2).float() * -(math.log(10000.0) / input_dim))
        positional_encoding[:, 0::2] = torch.sin(position * div_term)
        positional_encoding[:, 1::2] = torch.cos(position * div_term)
        positional_encoding = positional_encoding.unsqueeze(0)
        self.register_buffer('positional_encoding', positional_encoding)
开发者ID:apmoore1,项目名称:allennlp,代码行数:11,代码来源:bidirectional_language_model_transformer.py


示例18: findLR

    def findLR(self, model, optimizer, writer,
               start_lr=1e-7, end_lr=10, num_iters=50):
        model.train()

        losses = []
        lrs = np.logspace(np.log10(start_lr), np.log10(end_lr), num_iters)

        for lr in lrs:
            # Update LR
            for group in optimizer.param_groups: group['lr'] = lr

            batch = next(iter(self.data_loaders[0]))
            input_images, depthGT, maskGT = utils.unpack_batch_fixed(batch, self.cfg.device)
            # ------ define ground truth------
            XGT, YGT = torch.meshgrid([torch.arange(self.cfg.outH), # [H,W]
                                       torch.arange(self.cfg.outW)]) # [H,W]
            XGT, YGT = XGT.float(), YGT.float()
            XYGT = torch.cat([
                XGT.repeat([self.cfg.outViewN, 1, 1]), 
                YGT.repeat([self.cfg.outViewN, 1, 1])], dim=0) #[2V,H,W]
            XYGT = XYGT.unsqueeze(dim=0).to(self.cfg.device) #[1,2V,H,W]

            with torch.set_grad_enabled(True):
                optimizer.zero_grad()

                XYZ, maskLogit = model(input_images)
                XY = XYZ[:, :self.cfg.outViewN * 2, :, :]
                depth = XYZ[:, self.cfg.outViewN * 2:self.cfg.outViewN * 3, :,  :]
                mask = (maskLogit > 0).byte()
                # ------ Compute loss ------
                loss_XYZ = self.l1(XY, XYGT)
                loss_XYZ += self.l1(depth.masked_select(mask),
                                    depthGT.masked_select(mask))
                loss_mask = self.sigmoid_bce(maskLogit, maskGT)
                loss = loss_mask + self.cfg.lambdaDepth * loss_XYZ

                # Update weights
                loss.backward()
                # True Weight decay
                if self.cfg.trueWD is not None:
                    for group in optimizer.param_groups:
                        for param in group['params']:
                            param.data = param.data.add(
                                -self.cfg.trueWD * group['lr'], param.data)
                optimizer.step()

            losses.append(loss.item())

        fig, ax = plt.subplots()
        ax.plot(lrs, losses)
        ax.set_xlabel('learning rate')
        ax.set_ylabel('loss')
        ax.set_xscale('log')
        writer.add_figure('findLR', fig)
开发者ID:wkflyerman,项目名称:pytorch-3d-point-cloud-generation,代码行数:54,代码来源:trainer.py


示例19: get_subtree

def get_subtree(tree, actions, batch_size, num_actions):
    # gets the subtree corresponding to actions taken
    action_indices = actions[:,0]
    output = []
    for i, x in enumerate(tree[1:]):
        batch_starts = cudify(torch.arange(0, batch_size) * x.size(0) / batch_size)
        indices = []
        for b in range(batch_size):
            indices.append(cudify(torch.arange(action_indices[b] * num_actions**i, (action_indices[b]+1) * num_actions**i)) + batch_starts[b])
        indices = torch.cat(indices).long()
        output.append(x[indices])
    return output
开发者ID:tony32769,项目名称:treeqn,代码行数:12,代码来源:treeqn_utils.py


示例20: __init__

 def __init__(self, dropout, dim, max_len=5000):
     pe = torch.zeros(max_len, dim)
     position = torch.arange(0, max_len).unsqueeze(1)
     div_term = torch.exp(torch.arange(0, dim, 2) *
                          -(math.log(10000.0) / dim))
     pe[:, 0::2] = torch.sin(position * div_term)
     pe[:, 1::2] = torch.cos(position * div_term)
     pe = pe.unsqueeze(1)
     super(PositionalEncoding, self).__init__()
     self.register_buffer('pe', pe)
     self.dropout = nn.Dropout(p=dropout)
     self.dim = dim
开发者ID:xiamengzhou,项目名称:OpenNMT-py,代码行数:12,代码来源:Embeddings.py



注:本文中的torch.arange函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。


鲜花

握手

雷人

路过

鸡蛋
该文章已有0人参与评论

请发表评论

全部评论

专题导读
上一篇:
Python torch.argmax函数代码示例发布时间:2022-05-27
下一篇:
Python torch.add函数代码示例发布时间:2022-05-27
热门推荐
阅读排行榜

扫描微信二维码

查看手机版网站

随时了解更新最新资讯

139-2527-9053

在线客服(服务时间 9:00~18:00)

在线QQ客服
地址:深圳市南山区西丽大学城创智工业园
电邮:jeky_zhao#qq.com
移动电话:139-2527-9053

Powered by 互联科技 X3.4© 2001-2213 极客世界.|Sitemap