• 设为首页
  • 点击收藏
  • 手机版
    手机扫一扫访问
    迪恩网络手机版
  • 关注官方公众号
    微信扫一扫关注
    公众号

Python torch.randperm函数代码示例

原作者: [db:作者] 来自: [db:来源] 收藏 邀请

本文整理汇总了Python中torch.randperm函数的典型用法代码示例。如果您正苦于以下问题:Python randperm函数的具体用法?Python randperm怎么用?Python randperm使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。



在下文中一共展示了randperm函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。

示例1: random

        def random(nin, nout, nto):
            nker = nto * nout
            tbl = torch.Tensor(nker, 2)
            fi = torch.randperm(nin)
            frcntr = 0
            nfi = math.floor(nin / nto)  # number of distinct nto chunks
            totbl = tbl.select(1, 1)
            frtbl = tbl.select(1, 0)
            fitbl = fi.narrow(0, 0, (nfi * nto))  # part of fi that covers distinct chunks
            ufrtbl = frtbl.unfold(0, nto, nto)
            utotbl = totbl.unfold(0, nto, nto)
            ufitbl = fitbl.unfold(0, nto, nto)

            # start fill_ing frtbl
            for i in range(nout):  # fro each unit in target map
                ufrtbl.select(0, i).copy_(ufitbl.select(0, frcntr))
                frcntr += 1
                if frcntr - 1 == nfi:  # reset fi
                    fi.copy_(torch.randperm(nin))
                    frcntr = 1

            for tocntr in range(utotbl.size(0)):
                utotbl.select(0, tocntr).fill_(tocntr)

            return tbl
开发者ID:Jsmilemsj,项目名称:pytorch,代码行数:25,代码来源:SpatialConvolutionMap.py


示例2: main

def main(args):
    pyro.clear_param_store()
    data = build_linear_dataset(N, p)
    if args.cuda:
        # make tensors and modules CUDA
        data = data.cuda()
        softplus.cuda()
        regression_model.cuda()
    for j in range(args.num_epochs):
        if args.batch_size == N:
            # use the entire data set
            epoch_loss = svi.step(data)
        else:
            # mini batch
            epoch_loss = 0.0
            perm = torch.randperm(N) if not args.cuda else torch.randperm(N).cuda()
            # shuffle data
            data = data[perm]
            # get indices of each batch
            all_batches = get_batch_indices(N, args.batch_size)
            for ix, batch_start in enumerate(all_batches[:-1]):
                batch_end = all_batches[ix + 1]
                batch_data = data[batch_start: batch_end]
                epoch_loss += svi.step(batch_data)
        if j % 100 == 0:
            print("epoch avg loss {}".format(epoch_loss/float(N)))
开发者ID:lewisKit,项目名称:pyro,代码行数:26,代码来源:bayesian_regression.py


示例3: train_scene_discriminator

def train_scene_discriminator(x):
    netC.zero_grad()

    if has_cuda:
        target = torch.cuda.FloatTensor(opt.batch_size, 1)
    else:
        target = torch.FloatTensor(opt.batch_size, 1)

    x1 = x[0]
    x2 = x[1]
    h_p1 = netEP(x1).detach()
    h_p2 = netEP(x2).detach()

    half = int(opt.batch_size/2)
    if has_cuda:
        rp = torch.randperm(half).cuda()
    else:
        rp = torch.randperm(half).cpu()
    h_p2[:half] = h_p2[rp]
    target[:half] = 1
    target[half:] = 0

    out = netC([h_p1, h_p2])
    bce = bce_criterion(out, Variable(target))

    bce.backward()
    optimizerC.step()

    acc =out[:half].gt(0.5).sum() + out[half:].le(0.5).sum()
    return bce.data.cpu().numpy(), acc.data.cpu().numpy()/opt.batch_size
开发者ID:ZhenyueQin,项目名称:drnet-py,代码行数:30,代码来源:train_drnet.py


示例4: main

def main():
    parser = argparse.ArgumentParser(description="parse args")
    parser.add_argument('-n', '--num-epochs', default=1000, type=int)
    parser.add_argument('-b', '--batch-size', default=N, type=int)
    parser.add_argument('--cuda', action='store_true')
    args = parser.parse_args()
    data = build_linear_dataset(N, p)
    if args.cuda:
        # make tensors and modules CUDA
        data = data.cuda()
        softplus.cuda()
        regression_model.cuda()
    for j in range(args.num_epochs):
        if args.batch_size == N:
            # use the entire data set
            epoch_loss = svi.step(data)
        else:
            # mini batch
            epoch_loss = 0.0
            perm = torch.randperm(N) if not args.cuda else torch.randperm(N).cuda()
            # shuffle data
            data = data[perm]
            # get indices of each batch
            all_batches = get_batch_indices(N, args.batch_size)
            for ix, batch_start in enumerate(all_batches[:-1]):
                batch_end = all_batches[ix + 1]
                batch_data = data[batch_start: batch_end]
                epoch_loss += svi.step(batch_data)
        if j % 100 == 0:
            print("epoch avg loss {}".format(epoch_loss/float(N)))
开发者ID:Magica-Chen,项目名称:pyro,代码行数:30,代码来源:bayesian_regression.py


示例5: mixup_data

def mixup_data(x, y, alpha=1.0, use_cuda=True):
    if alpha>0.:
        lam = np.random.beta(alpha, alpha)
    else:
        lam = 1.
    batch_size = x.size()[0]
    if use_cuda:
        index = torch.randperm(batch_size).cuda()
    else:
        index = torch.randperm(batch_size)

    mixed_x = lam*x + (1-lam)*x[index,:]
    y_a, y_b = y, y[index]
    return mixed_x, y_a, y_b, lam
开发者ID:jiqiujia,项目名称:kaggle_camera_id,代码行数:14,代码来源:utils.py


示例6: pretrain

 def pretrain(self, train_data, corrupter, tester):
     src, rel, dst = train_data
     n_train = len(src)
     optimizer = Adam(self.mdl.parameters())
     #optimizer = SGD(self.mdl.parameters(), lr=1e-4)
     n_epoch = self.config.n_epoch
     n_batch = self.config.n_batch
     best_perf = 0
     for epoch in range(n_epoch):
         epoch_loss = 0
         rand_idx = t.randperm(n_train)
         src = src[rand_idx]
         rel = rel[rand_idx]
         dst = dst[rand_idx]
         src_corrupted, dst_corrupted = corrupter.corrupt(src, rel, dst)
         src_cuda = src.cuda()
         rel_cuda = rel.cuda()
         dst_cuda = dst.cuda()
         src_corrupted = src_corrupted.cuda()
         dst_corrupted = dst_corrupted.cuda()
         for s0, r, t0, s1, t1 in batch_by_num(n_batch, src_cuda, rel_cuda, dst_cuda, src_corrupted, dst_corrupted,
                                               n_sample=n_train):
             self.mdl.zero_grad()
             loss = t.sum(self.mdl.pair_loss(Variable(s0), Variable(r), Variable(t0), Variable(s1), Variable(t1)))
             loss.backward()
             optimizer.step()
             self.mdl.constraint()
             epoch_loss += loss.data[0]
         logging.info('Epoch %d/%d, Loss=%f', epoch + 1, n_epoch, epoch_loss / n_train)
         if (epoch + 1) % self.config.epoch_per_test == 0:
             test_perf = tester()
             if test_perf > best_perf:
                 self.save(os.path.join(config().task.dir, self.config.model_file))
                 best_perf = test_perf
     return best_perf
开发者ID:cai-lw,项目名称:KBGAN,代码行数:35,代码来源:trans_d.py


示例7: val

def val(spatial_size, Scale, precomputeStride):
    d = pickle.load(open('pickle/test.pickle', 'rb'))
    d = torchnet.dataset.ListDataset(d)
    randperm = torch.randperm(len(d))

    def perm(idx, size):
        return randperm[idx]

    def merge(tbl):
        inp = scn.InputBatch(2, spatial_size)
        center = spatial_size.float().view(1, 2) / 2
        p = torch.LongTensor(2)
        v = torch.FloatTensor([1, 0, 0])
        for char in tbl['input']:
            inp.addSample()
            for stroke in char:
                stroke = stroke.float() * (Scale - 0.01) / 255 - 0.5 * (Scale - 0.01)
                stroke += center.expand_as(stroke)
                scn.dim_fn(
                    2,
                    'drawCurve')(
                    inp.metadata.ffi,
                    inp.features,
                    stroke)
        inp.precomputeMetadata(precomputeStride)
        return {'input': inp, 'target': torch.LongTensor(tbl['target']) - 1}
    bd = torchnet.dataset.BatchDataset(d, 183, perm=perm, merge=merge)
    tdi = scn.threadDatasetIterator(bd)

    def iter():
        randperm = torch.randperm(len(d))
        return tdi()
    return iter
开发者ID:gaobb,项目名称:SparseConvNet,代码行数:33,代码来源:data.py


示例8: train_valid_splitter

def train_valid_splitter(x, y, split, shuffle=True):
    ''' Generate training and validation tensors from whole dataset data and label tensors
    
    :param x: Data tensor for whole dataset
    :type x: torch.Tensor
    :param y: Label tensor for whole dataset
    :type y: torch.Tensor
    :param split: Fraction of dataset to be used for validation
    :type split: float
    :param shuffle: If True randomize tensor order before splitting else do not randomize 
    :type shuffle: bool
    :return: Training and validation tensors (training data, training labels, validation data, validation labels)
    :rtype: tuple
    '''
    num_samples_x = x.size()[0]
    num_valid_samples = math.floor(num_samples_x * split)

    if shuffle:
        indicies = torch.randperm(num_samples_x)
        x, y = x[indicies], y[indicies]

    x_val, y_val = x[:num_valid_samples], y[:num_valid_samples]
    x, y = x[num_valid_samples:], y[num_valid_samples:]

    return x, y, x_val, y_val
开发者ID:little1tow,项目名称:torchbearer,代码行数:25,代码来源:cv_utils.py


示例9: sparse_

def sparse_(tensor, sparsity, std=0.01):
    r"""Fills the 2D input `Tensor` as a sparse matrix, where the
    non-zero elements will be drawn from the normal distribution
    :math:`\mathcal{N}(0, 0.01)`, as described in "Deep learning via
    Hessian-free optimization" - Martens, J. (2010).

    Args:
        tensor: an n-dimensional `torch.Tensor`
        sparsity: The fraction of elements in each column to be set to zero
        std: the standard deviation of the normal distribution used to generate
            the non-zero values

    Examples:
        >>> w = torch.empty(3, 5)
        >>> nn.init.sparse_(w, sparsity=0.1)
    """
    if tensor.ndimension() != 2:
        raise ValueError("Only tensors with 2 dimensions are supported")

    rows, cols = tensor.shape
    num_zeros = int(math.ceil(sparsity * rows))

    with torch.no_grad():
        tensor.normal_(0, std)
        for col_idx in range(cols):
            row_indices = torch.randperm(rows)
            zero_indices = row_indices[:num_zeros]
            tensor[zero_indices, col_idx] = 0
    return tensor
开发者ID:xiongyw,项目名称:pytorch,代码行数:29,代码来源:init.py


示例10: __call__

 def __call__(self, *inputs):
     order = th.randperm(inputs[0].dim())
     outputs = []
     for idx, _input in enumerate(inputs):
         _input = _input.index_select(0, order)
         outputs.append(_input)
     return outputs if idx > 1 else outputs[0]
开发者ID:BrianDo2005,项目名称:torchsample,代码行数:7,代码来源:tensor_transforms.py


示例11: collapse_exp_1

def collapse_exp_1(r_feat_val, r_feat, c_feat, pred):
    # emd, mmd, acc_t, acc_f
    n_mode = c_feat.size(0)
    c_feat_repeat = c_feat[pred]
    scores = np.zeros((n_mode, 4))
    t_feat = r_feat.clone()
    index = torch.arange(0, 2000).long()
    collapsed_order = torch.randperm(n_mode).long()
    Mxx = distance(r_feat_val, r_feat_val, sqrt=False)
    
    for i in range(n_mode):
        # Compute Score
        Mxy = distance(r_feat_val, t_feat, sqrt=False)
        Myy = distance(t_feat, t_feat, sqrt=False)
        scores[i, 0] = wasserstein(Mxy, True)
        scores[i, 1] = mmd(Mxx, Mxy, Myy, 1)
        s = knn(Mxx, Mxy, Myy, 1, True)
        scores[i, 2], scores[i, 3] = s.acc_t, s.acc_f
        
        # Do collapse 
        c = collapsed_order[i]
        cidx = index[pred.eq(c)]
        t_feat[cidx] = c_feat_repeat[cidx]
        
    return scores
开发者ID:RobinROAR,项目名称:TensorflowTutorialsCode,代码行数:25,代码来源:experiments.py


示例12: pretrain

 def pretrain(self, train_data, corrupter, tester):
     src, rel, dst = train_data
     n_train = len(src)
     n_epoch = self.config.n_epoch
     n_batch = self.config.n_batch
     optimizer = Adam(self.mdl.parameters(), weight_decay=self.weight_decay)
     best_perf = 0
     for epoch in range(n_epoch):
         epoch_loss = 0
         if epoch % self.config.sample_freq == 0:
             rand_idx = t.randperm(n_train)
             src = src[rand_idx]
             rel = rel[rand_idx]
             dst = dst[rand_idx]
             src_corrupted, rel_corrupted, dst_corrupted = corrupter.corrupt(src, rel, dst)
             src_corrupted = src_corrupted.cuda()
             rel_corrupted = rel_corrupted.cuda()
             dst_corrupted = dst_corrupted.cuda()
         for ss, rs, ts in batch_by_num(n_batch, src_corrupted, rel_corrupted, dst_corrupted, n_sample=n_train):
             self.mdl.zero_grad()
             label = t.zeros(len(ss)).type(t.LongTensor).cuda()
             loss = t.sum(self.mdl.softmax_loss(Variable(ss), Variable(rs), Variable(ts), label))
             loss.backward()
             optimizer.step()
             epoch_loss += loss.data[0]
         logging.info('Epoch %d/%d, Loss=%f', epoch + 1, n_epoch, epoch_loss / n_train)
         if (epoch + 1) % self.config.epoch_per_test == 0:
             test_perf = tester()
             if test_perf > best_perf:
                 self.save(os.path.join(config().task.dir, self.config.model_file))
                 best_perf = test_perf
     return best_perf
开发者ID:cai-lw,项目名称:KBGAN,代码行数:32,代码来源:distmult.py


示例13: _generate_perms_and_inverses

 def _generate_perms_and_inverses(feature_size, num_perms):
     perms = [torch.randperm(feature_size)
              for _ in range(num_perms)]
     inv_perms = [torch.cat([(perm == i).nonzero()
                             for i in range(feature_size)], 0).squeeze()
                  for perm in perms]
     return perms, inv_perms
开发者ID:jramapuram,项目名称:memory,代码行数:7,代码来源:holographic_memory.py


示例14: drop_exp_1

def drop_exp_1(r_feat_val, r_feat_train, pred):
    # emd, mmd, acc_t, acc_f
    n_mode = len(Counter(pred))
    scores = np.zeros((n_mode, 4))
    t_feat = r_feat_train.clone()
    collapsed_order = torch.randperm(n_mode).long()
    index = torch.arange(0, r_feat_train.size(0)).long()
    collapsed = torch.zeros(r_feat_train.size(0)).byte()
    Mxx = distance(r_feat_val, r_feat_val, sqrt=True)
    
    for i in range(n_mode):
        # Compute Score
        Mxy = distance(r_feat_val, t_feat, sqrt=True)
        Myy = distance(t_feat, t_feat, sqrt=True)
        scores[i, 0] = wasserstein(Mxy, False)
        scores[i, 1] = mmd(Mxx, Mxy, Myy, 1)
        s = knn(Mxx, Mxy, Myy, 1, True)
        scores[i, 2], scores[i, 3] = s.acc_t, s.acc_f
        
        # Do drop -- fill dropped slots with remaining samples
        c = collapsed_order[i]
        collapsed[pred.eq(c)] = 1
        cidx = index[collapsed.eq(1)]
        ncidx = index[collapsed.ne(1)]
        if ncidx.dim() == 0 or cidx.dim() == 0 or ncidx.size(0) == 0:
            continue
        for j in cidx:
            copy_idx = np.random.randint(0, ncidx.size(0))
            t_feat[j] = t_feat[ncidx[copy_idx]]
            
    return scores
开发者ID:RobinROAR,项目名称:TensorflowTutorialsCode,代码行数:31,代码来源:experiments.py


示例15: test

    def test(self):
        if opt['model'] == 'CharCNN':
            X_train = self.dataset.df_train['text_parsed'].values
            X_test = self.dataset.df_test['text_parsed'].values
        else:
            X_train = self.dataset.df_train['ids'].values
            X_test = self.dataset.df_test['ids'].values

        Y_train = self.dataset.df_train['label'].values
        Y_test = self.dataset.df_test['label'].values        

        m_train = len(X_train)
        permutation = torch.randperm(m_train)

        accuracies = []
        for start_idx in range(0, m_train, opt['batch_size']):
            indices = permutation[start_idx:start_idx + opt['batch_size']]

            if opt['model'] == 'CharCNN':
                X_train_batch, X_train_mask_batch, Y_train_batch = self.create_batch_char(X_train, Y_train, indices)
            else:
                X_train_batch, X_train_mask_batch, Y_train_batch = self.create_batch(X_train, Y_train, indices)
            Y_predict = self.model(X_train_batch, X_train_mask_batch)
            loss = self.loss(Y_predict, Y_train_batch)

            accuracy, _ = self.calculate_accuracy(Y_train_batch, Y_predict)
            accuracies.append(accuracy)
            print(loss.cpu().data.numpy(), accuracy)

            del X_train_batch, X_train_mask_batch, Y_train_batch, Y_predict

        print(sum(accuracies)/len(accuracies))
开发者ID:mircean,项目名称:ML,代码行数:32,代码来源:trainer.py


示例16: optimize_model

def optimize_model(model, x, y, x_test, y_test, batch_size=32, learning_rate=1e-4, weight_decay=1e-4):
    optimizer = torch.optim.Adam(model.parameters(), lr=learning_rate, weight_decay=weight_decay)
    N = y.size(0)
    num_one_epoch = np.floor(N / batch_size).astype(np.int)
    num_epoch = np.floor(3000/num_one_epoch).astype(np.int)
    for epoch in range(num_epoch):
        index = torch.randperm(N)
        for t in range(num_one_epoch):
            idx_start = t*batch_size
            idx_end = (t+1)*batch_size
            y_pred = model(x[index[idx_start:idx_end], :])
            loss = torch.nn.MSELoss()(y_pred, y[index[idx_start:idx_end]])
            # print(epoch, t, loss.data[0])

            optimizer.zero_grad()
            loss.backward()
            optimizer.step()

    y_pred = model(x)
    loss = torch.nn.MSELoss()(y_pred, y)
    y_test_pred = model(x_test)
    test_loss = torch.nn.MSELoss()(y_test_pred, y_test)
    # print(test_loss.data[0])
    print(loss.data[0], test_loss.data[0])
    return loss.data[0], test_loss.data[0]
开发者ID:yangyi02,项目名称:my-scripts,代码行数:25,代码来源:few_shot_regression.py


示例17: drop_exp_2

def drop_exp_2(r_feat_val, r_feat_train, pred):
    # incep_score, mode_score, fid
    n_mode = len(Counter(pred))
    scores = np.zeros((n_mode, 3))
    t_feat = r_feat_train.clone()
    collapsed_order = torch.randperm(n_mode).long()
    index = torch.arange(0, r_feat_train.size(0)).long()
    collapsed = torch.zeros(r_feat_train.size(0)).byte()
    Mxx = distance(r_feat_val, r_feat_val, sqrt=True)
    
    for i in range(n_mode):
        # Compute Score
        Mxy = distance(r_feat_val, t_feat, sqrt=True)
        Myy = distance(t_feat, t_feat, sqrt=True)
        scores[i, 0] = inception_score(t_feat)
        scores[i, 1] = mode_score(t_feat, r_feat_val)
        scores[i, 2] = fid(t_feat, r_feat_val)
        
        # Do drop -- fill dropped slots with remaining samples
        c = collapsed_order[i]
        collapsed[pred.eq(c)] = 1
        cidx = index[collapsed.eq(1)]
        ncidx = index[collapsed.ne(1)]
        if ncidx.dim() == 0 or cidx.dim() == 0 or ncidx.size(0) == 0:
            continue
        for j in cidx:
            copy_idx = np.random.randint(0, ncidx.size(0))
            t_feat[j] = t_feat[ncidx[copy_idx]]
            
    return scores
开发者ID:RobinROAR,项目名称:TensorflowTutorialsCode,代码行数:30,代码来源:experiments.py


示例18: _shuffle_training_data

    def _shuffle_training_data(self):
        """
        Shuffles the training data.

        :return: None
        """
        num_examples = len(self.train_x)
        shuffled_indices = torch.randperm(num_examples)
        self.train_x = self.train_x[shuffled_indices]
        self.train_y = self.train_y[shuffled_indices]
开发者ID:simonjmendelsohn,项目名称:StackNN,代码行数:10,代码来源:base.py


示例19: __iter__

    def __iter__(self):
        rand_num = torch.randperm(self.num_per_batch).view(-1,1) * self.batch_size
        self.rand_num = rand_num.expand(self.num_per_batch, self.batch_size) + self.range

        self.rand_num_view = self.rand_num.view(-1)

        if self.leftover_flag:
            self.rand_num_view = torch.cat((self.rand_num_view, self.leftover),0)

        return iter(self.rand_num_view)
开发者ID:lianDaniel,项目名称:R-FCN.pytorch,代码行数:10,代码来源:trainval_net.py


示例20: __call__

    def __call__(self, data, subsample=True):

        # deterministically shuffle based on epoch
        g = torch.Generator()
        g.manual_seed(self.epoch)

        indices = list(torch.randperm(len(data), generator=g))
        if not subsample:
            return [data[i] for i in indices]
        return [data[i] for i in self.subsample(indices)]
开发者ID:AhlamMD,项目名称:decaNLP,代码行数:10,代码来源:iterator.py



注:本文中的torch.randperm函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。


鲜花

握手

雷人

路过

鸡蛋
该文章已有0人参与评论

请发表评论

全部评论

专题导读
上一篇:
Python torch.save函数代码示例发布时间:2022-05-27
下一篇:
Python torch.randn函数代码示例发布时间:2022-05-27
热门推荐
阅读排行榜

扫描微信二维码

查看手机版网站

随时了解更新最新资讯

139-2527-9053

在线客服(服务时间 9:00~18:00)

在线QQ客服
地址:深圳市南山区西丽大学城创智工业园
电邮:jeky_zhao#qq.com
移动电话:139-2527-9053

Powered by 互联科技 X3.4© 2001-2213 极客世界.|Sitemap