• 设为首页
  • 点击收藏
  • 手机版
    手机扫一扫访问
    迪恩网络手机版
  • 关注官方公众号
    微信扫一扫关注
    公众号

Python torch.mean函数代码示例

原作者: [db:作者] 来自: [db:来源] 收藏 邀请

本文整理汇总了Python中torch.mean函数的典型用法代码示例。如果您正苦于以下问题:Python mean函数的具体用法?Python mean怎么用?Python mean使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。



在下文中一共展示了mean函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。

示例1: train_step

    def train_step(self, state_batch, mcts_probs, winner_batch, lr):
        """perform a training step"""
        # wrap in Variable
        if self.use_gpu:
            state_batch = Variable(torch.FloatTensor(state_batch).cuda())
            mcts_probs = Variable(torch.FloatTensor(mcts_probs).cuda())
            winner_batch = Variable(torch.FloatTensor(winner_batch).cuda())
        else:
            state_batch = Variable(torch.FloatTensor(state_batch))
            mcts_probs = Variable(torch.FloatTensor(mcts_probs))
            winner_batch = Variable(torch.FloatTensor(winner_batch))

        # zero the parameter gradients
        self.optimizer.zero_grad()
        # set learning rate
        set_learning_rate(self.optimizer, lr)

        # forward
        log_act_probs, value = self.policy_value_net(state_batch)
        # define the loss = (z - v)^2 - pi^T * log(p) + c||theta||^2
        # Note: the L2 penalty is incorporated in optimizer
        value_loss = F.mse_loss(value.view(-1), winner_batch)
        policy_loss = -torch.mean(torch.sum(mcts_probs*log_act_probs, 1))
        loss = value_loss + policy_loss
        # backward and optimize
        loss.backward()
        self.optimizer.step()
        # calc policy entropy, for monitoring only
        entropy = -torch.mean(
                torch.sum(torch.exp(log_act_probs) * log_act_probs, 1)
                )
        return loss.data[0], entropy.data[0]
开发者ID:Vendeloeranu,项目名称:AlphaZero_Gomoku,代码行数:32,代码来源:policy_value_net_pytorch.py


示例2: forward

    def forward(self, x, k):

        self.B = x.size()[0] #batch size

        #Encode
        mu, logvar = self.encode(x)  #[B,Z]
        z, logpz, logqz = self.sample(mu, logvar, k=k) #[P,B,Z], [P,B]

        #Decode
        x_hat = self.decode(z) #[P,B,X]
        logpx = log_bernoulli(x_hat, x)  #[P,B]

        #Compute elbo
        elbo = logpx + logpz - logqz #[P,B]
        if k>1:
            max_ = torch.max(elbo, 0)[0] #[B]
            elbo = torch.log(torch.mean(torch.exp(elbo - max_), 0)) + max_ #[B]
            
        elbo = torch.mean(elbo) #[1]
        logpx = torch.mean(logpx)
        logpz = torch.mean(logpz)
        logqz = torch.mean(logqz)


        return elbo, logpx, logpz, logqz
开发者ID:chriscremer,项目名称:Other_Code,代码行数:25,代码来源:pytorch_vae3.py


示例3: setUp

    def setUp(self, length=3, factor=10, count=1000000,
              seed=None, dtype=torch.float64, device=None):
        '''Set up the test values.

        Args:
            length: Size of the vector.
            factor: To multiply the mean and standard deviation.
            count: Number of samples for Monte-Carlo estimation.
            seed: Seed for the random number generator.
            dtype: The data type.
            device: In which device.
        '''
        if seed is not None:
            torch.manual_seed(seed)

        # input mean and covariance
        self.mu = torch.randn(length, dtype=dtype, device=device) * factor
        self.cov = rand.definite(length, dtype=dtype, device=device,
                                 positive=True, semi=False, norm=factor**2)
        self.var = self.cov.diag()

        # Monte-Carlo estimation of the output mean and variance
        normal = torch.distributions.MultivariateNormal(self.mu, self.cov)
        out_samples = normal.sample((count,)).clamp_(min=0.0)
        self.mc_mu = torch.mean(out_samples, dim=0)
        self.mc_var = torch.var(out_samples, dim=0)
        normal = torch.distributions.MultivariateNormal(self.mu * 0, self.cov)
        out_samples = normal.sample((count,)).clamp_(min=0.0)
        mean = torch.mean(out_samples, dim=0)
        self.mc_zm_cov = cov(out_samples)
        self.mc_zm_corr = self.mc_zm_cov + outer(mean)
开发者ID:ModarTensai,项目名称:network_moments,代码行数:31,代码来源:tests.py


示例4: forward

    def forward(self, x, k=1):
        self.k = k
        self.B = x.size()[0]
        mu, logvar = self.encode(x)
        z, logpz, logqz = self.sample(mu, logvar, k=k)
        x_hat, logpW, logqW = self.decode(z)

        logpx = log_bernoulli(x_hat, x)  #[P,B]


        elbo = logpx + logpz - logqz + (logpW - logqW)*.00000001  #[P,B]

        if k>1:
            max_ = torch.max(elbo, 0)[0] #[B]
            elbo = torch.log(torch.mean(torch.exp(elbo - max_), 0)) + max_ #[B]

        elbo = torch.mean(elbo) #[1]

        #for printing
        logpx = torch.mean(logpx)
        logpz = torch.mean(logpz)
        logqz = torch.mean(logqz)
        self.x_hat_sigmoid = F.sigmoid(x_hat)

        return elbo, logpx, logpz, logqz, logpW, logqW
开发者ID:chriscremer,项目名称:Other_Code,代码行数:25,代码来源:bvae_pytorch.py


示例5: calculate_loss

    def calculate_loss(self, x, beta=1., average=False):
        '''
        :param x: input image(s)
        :param beta: a hyperparam for warmup
        :param average: whether to average loss or not
        :return: value of a loss function
        '''
        # pass through VAE
        x_mean, x_logvar, z_q, z_q_mean, z_q_logvar = self.forward(x)

        # RE
        if self.args.input_type == 'binary':
            RE = log_Bernoulli(x, x_mean, dim=1)
        elif self.args.input_type == 'gray' or self.args.input_type == 'continuous':
            RE = -log_Logistic_256(x, x_mean, x_logvar, dim=1)
        else:
            raise Exception('Wrong input type!')

        # KL
        log_p_z = self.log_p_z(z_q)
        log_q_z = log_Normal_diag(z_q, z_q_mean, z_q_logvar, dim=1)
        KL = -(log_p_z - log_q_z)

        loss = - RE + beta * KL

        if average:
            loss = torch.mean(loss)
            RE = torch.mean(RE)
            KL = torch.mean(KL)

        return loss, RE, KL
开发者ID:jramapuram,项目名称:vae_vampprior,代码行数:31,代码来源:VAE.py


示例6: evaluate

def evaluate(model):
    model.eval()
    running_loss = [0., 0., 0.]
    epoch_loss = 0.
    display_step = 100
    for batch_idx, (x, c) in enumerate(test_loader):
        x, c = x.to(device), c.to(device)
        log_p, logdet = model(x, c)
        log_p, logdet = torch.mean(log_p), torch.mean(logdet)
        loss = -(log_p + logdet)

        running_loss[0] += loss.item() / display_step
        running_loss[1] += log_p.item() / display_step
        running_loss[2] += logdet.item() / display_step
        epoch_loss += loss.item()

        if (batch_idx + 1) % 100 == 0:
            print('Global Step : {}, [{}, {}] [Log pdf, Log p(z), Log Det] : {}'
                  .format(global_step, epoch, batch_idx + 1, np.array(running_loss)))
            running_loss = [0., 0., 0.]
        del x, c, log_p, logdet, loss
    del running_loss
    epoch_loss /= len(test_loader)
    print('Evaluation Loss : {:.4f}'.format(epoch_loss))
    return epoch_loss
开发者ID:afd77,项目名称:FloWaveNet,代码行数:25,代码来源:train_apex.py


示例7: forward

    def forward(self, frame, policy):
        # x: [B,2,84,84]
        self.B = frame.size()[0]
        # policy = policies[0]

        # print (frame.size())
        # fds

        # print (frame.size())

        #Predict mask
        mask = self.predict_mask(frame)  #[B,2,210,160]
        # print (mask.size())
        

        mask = mask.repeat(1,3,1,1)
        # print (mask.size())
        # fsad
        #frame: [B,6,210,160]
        masked_frame = frame * mask


        log_dist_mask = policy.action_logdist(masked_frame)
        log_dist_true = policy.action_logdist(frame)

        action_dist_kl = torch.sum((log_dist_true - log_dist_mask)*torch.exp(log_dist_true), dim=1) #[B]
        action_dist_kl = torch.mean(action_dist_kl) # * 1000

        mask = mask.view(self.B, -1)
        mask_sum = torch.mean(torch.sum(mask, dim=1)) * .000001

        loss = action_dist_kl + mask_sum

        return loss, action_dist_kl, mask_sum
开发者ID:chriscremer,项目名称:Other_Code,代码行数:34,代码来源:train_mask_net.py


示例8: forward

    def forward(self, frame, DQNs):
        # x: [B,2,84,84]
        self.B = frame.size()[0]

        blurred_frame = self.blur_frame(frame)

        #Predict mask
        blur_weighting = self.predict_precision(frame)  #[B,1,480,640]
        blur_weighting = blur_weighting.repeat(1,3,1,1)

        mixed_frame = frame * blur_weighting + (1.-blur_weighting)*blurred_frame


        difs= []
        for i in range(len(DQNs)):
            q_mask = DQNs[i](mixed_frame)
            q_real = DQNs[i](frame)

            dif = torch.mean((q_mask-q_real)**2)  #[B,A]
            difs.append(dif)

        difs = torch.stack(difs)
        dif = torch.mean(difs)


        blur_weighting = blur_weighting.view(self.B, -1)
        mask_sum = torch.mean(torch.sum(blur_weighting, dim=1)) * .0000001

        loss = dif + mask_sum

        return loss, dif, mask_sum
开发者ID:chriscremer,项目名称:Other_Code,代码行数:31,代码来源:vae_doom_withblur.py


示例9: prepare_model

def prepare_model():
    since = time.time()

    num_epochs = 1
    for epoch in range(num_epochs):
        print('Epoch {}/{}'.format(epoch, num_epochs - 1))
        print('-' * 10)

        # Each epoch has a training and validation phase
        for phase in ['train']:

            mean = torch.zeros(3)
            std = torch.zeros(3)
            # Iterate over data.
            for data in dataloaders[phase]:
                # get the inputs
                inputs, labels = data
                now_batch_size,c,h,w = inputs.shape
                mean += torch.sum(torch.mean(torch.mean(inputs,dim=3),dim=2),dim=0)
                std += torch.sum(torch.std(inputs.view(now_batch_size,c,h*w),dim=2),dim=0)
                
            print(mean/dataset_sizes['train'])
            print(std/dataset_sizes['train'])

    time_elapsed = time.time() - since
    print('Training complete in {:.0f}m {:.0f}s'.format(
        time_elapsed // 60, time_elapsed % 60))
    return 
开发者ID:AITTSMD,项目名称:Person_reID_baseline_pytorch,代码行数:28,代码来源:prepare_static.py


示例10: forward

    def forward(self, x, k=1):
        
        self.B = x.size()[0]
        mu, logvar = self.encode(x)
        z, logpz, logqz = self.sample(mu, logvar, k=k)  #[P,B,Z]
        x_hat = self.decode(z)  #[PB,X]
        x_hat = x_hat.view(k, self.B, -1)
        # print x_hat.size()
        # print x_hat.size()
        # print x.size()
        logpx = log_bernoulli(x_hat, x)  #[P,B]

        elbo = logpx + logpz - logqz  #[P,B]

        if k>1:
            max_ = torch.max(elbo, 0)[0] #[B]
            elbo = torch.log(torch.mean(torch.exp(elbo - max_), 0)) + max_ #[B]

        elbo = torch.mean(elbo) #[1]

        #for printing
        logpx = torch.mean(logpx)
        logpz = torch.mean(logpz)
        logqz = torch.mean(logqz)
        self.x_hat_sigmoid = F.sigmoid(x_hat)

        return elbo, logpx, logpz, logqz
开发者ID:chriscremer,项目名称:Other_Code,代码行数:27,代码来源:vae_deconv.py


示例11: get_paf_and_heatmap

def get_paf_and_heatmap(model, img_raw, scale_search, param_stride=8, box_size=368):
    multiplier = [scale * box_size / img_raw.shape[0] for scale in scale_search]

    heatmap_avg = torch.zeros((len(multiplier), 19, img_raw.shape[0], img_raw.shape[1])).cuda()
    paf_avg = torch.zeros((len(multiplier), 38, img_raw.shape[0], img_raw.shape[1])).cuda()

    for i, scale in enumerate(multiplier):
        img_test = cv2.resize(img_raw, (0, 0), fx=scale, fy=scale, interpolation=cv2.INTER_CUBIC)
        img_test_pad, pad = pad_right_down_corner(img_test, param_stride, param_stride)
        img_test_pad = np.transpose(np.float32(img_test_pad[:, :, :, np.newaxis]), (3, 2, 0, 1)) / 256 - 0.5

        feed = Variable(torch.from_numpy(img_test_pad)).cuda()
        output1, output2 = model(feed)

        print(output1.size())
        print(output2.size())

        heatmap = nn.UpsamplingBilinear2d((img_raw.shape[0], img_raw.shape[1])).cuda()(output2)

        paf = nn.UpsamplingBilinear2d((img_raw.shape[0], img_raw.shape[1])).cuda()(output1)

        heatmap_avg[i] = heatmap[0].data
        paf_avg[i] = paf[0].data

    heatmap_avg = torch.transpose(torch.transpose(torch.squeeze(torch.mean(heatmap_avg, 0)), 0, 1), 1, 2).cuda()
    heatmap_avg = heatmap_avg.cpu().numpy()

    paf_avg = torch.transpose(torch.transpose(torch.squeeze(torch.mean(paf_avg, 0)), 0, 1), 1, 2).cuda()
    paf_avg = paf_avg.cpu().numpy()

    return paf_avg, heatmap_avg
开发者ID:codealphago,项目名称:pytorch-pose-estimation,代码行数:31,代码来源:pose_estimation.py


示例12: angle_length_loss

    def angle_length_loss(y_pred, y_true, weights):
        y_true = y_true.permute(0, 2, 3, 1)
        y_pred = y_pred.permute(0, 2, 3, 1)
        weights = weights.permute(0, 2, 3, 1)

        # Single threshold

        # score_per_bundle = {}
        # bundles = ExpUtils.get_bundle_names(HP.CLASSES)[1:]

        nr_of_classes = int(y_true.shape[-1] / 3.)
        scores = torch.zeros(nr_of_classes)

        for idx in range(nr_of_classes):
            y_pred_bund = y_pred[:, :, :, (idx * 3):(idx * 3) + 3].contiguous()
            y_true_bund = y_true[:, :, :, (idx * 3):(idx * 3) + 3].contiguous()  # [x,y,z,3]
            weights_bund = weights[:, :, :, (idx * 3)].contiguous()  # [x,y,z]

            angles = PytorchUtils.angle_last_dim(y_pred_bund, y_true_bund)
            angles_weighted = angles / weights_bund
            #norm lengths to 0-1 to be more equal to angles?? -> peaks are already around 1 -> ok
            lengths = (torch.norm(y_pred_bund, 2., -1) - torch.norm(y_true_bund, 2, -1)) ** 2
            lenghts_weighted = lengths * weights_bund

            # Divide by weights.max otherwise lens would be way bigger
            # Flip angles to make it a minimization problem
            combined = -angles_weighted + lenghts_weighted / weights_bund.max()

            scores[idx] = torch.mean(combined)

        return torch.mean(scores)
开发者ID:doctoryfx,项目名称:TractSeg,代码行数:31,代码来源:PytorchUtils.py


示例13: forward

    def forward(self, frame, policies):
        # x: [B,2,84,84]
        self.B = frame.size()[0]
        

        #Predict mask
        pre_mask = self.predict_mask_nosigmoid(frame)
        mask = F.sigmoid(pre_mask)

        masked_frame = frame * mask
        kls = []
        for i in range(len(policies)):
            policy = policies[i]

            log_dist_mask = policy.action_logdist(masked_frame)
            log_dist_true = policy.action_logdist(frame)

            action_dist_kl = torch.sum((log_dist_true - log_dist_mask)*torch.exp(log_dist_true), dim=1) #[B]
            action_dist_kl = torch.mean(action_dist_kl) # * 1000
            kls.append(action_dist_kl)

        kls = torch.stack(kls)  #[policies, B]
        action_dist_kl = torch.mean(action_dist_kl) #[1] #over batch and over policies

        pre_mask = pre_mask.view(self.B, -1)
        mask_cost = torch.abs(pre_mask + 20)
        # mask_sum = torch.mean(torch.sum(mask_cost, dim=1)) * .00001
        # mask_cost = torch.mean(mask_cost) * .00001
        mask_cost = torch.mean(mask_cost) * .01

        loss = action_dist_kl + mask_cost

        return loss, action_dist_kl, mask_cost
开发者ID:chriscremer,项目名称:Other_Code,代码行数:33,代码来源:learn_to_mask_amortized_vae_policies.py


示例14: encode_and_logprob

    def encode_and_logprob(self, x):

        for i in range(len(self.first_half_weights)-1):
            x = self.act_func(self.first_half_weights[i](x))

            # pre_act = self.first_half_weights[i](x) #[B,D]
            # # pre_act_with_noise = Variable(torch.randn(1, self.arch_2[i][1]).type(self.dtype)) * pre_act
            # probs = torch.ones(1, self.arch_2[i][1]) * .5
            # pre_act_with_noise = Variable(torch.bernoulli(probs).type(self.dtype)) * pre_act
            # x = self.act_func(pre_act_with_noise)

        mean = self.first_half_weights[-1](x)
        logvar = self.q_logvar(x)

        # print (logvar)
        #Sample

        eps = Variable(torch.randn(1, self.z_size)) #.type(self.dtype))
        # x =  (torch.sqrt(torch.exp(W_logvars)) * eps) + W_means 
        x =  (torch.exp(.5*logvar) * eps) + mean 

        logq = -torch.mean(  logvar.sum(1) + ((x - mean).pow(2)/torch.exp(logvar)).sum(1))
        logp = torch.mean( x.pow(2).sum(1))


        return x, logq+logp
开发者ID:chriscremer,项目名称:Other_Code,代码行数:26,代码来源:bottleneck_BNN_q.py


示例15: singleTagLoss

def singleTagLoss(pred_tag, keypoints):
    """
    associative embedding loss for one image
    """
    eps = 1e-6
    tags = []
    pull = 0
    for i in keypoints:
        tmp = []
        for j in i:
            if j[1]>0:
                tmp.append(pred_tag[j[0]])
        if len(tmp) == 0:
            continue
        tmp = torch.stack(tmp)
        tags.append(torch.mean(tmp, dim=0))
        pull = pull +  torch.mean((tmp - tags[-1].expand_as(tmp))**2)

    if len(tags) == 0:
        return make_input(torch.zeros([1]).float()), make_input(torch.zeros([1]).float())

    tags = torch.stack(tags)[:,0]

    num = tags.size()[0]
    size = (num, num, tags.size()[1])
    A = tags.unsqueeze(dim=1).expand(*size)
    B = A.permute(1, 0, 2)

    diff = A - B
    diff = torch.pow(diff, 2).sum(dim=2)[:,:,0]
    push = torch.exp(-diff)
    push = (torch.sum(push) - num)
    return push/((num - 1) * num + eps) * 0.5, pull/(num + eps)
开发者ID:cuizy15,项目名称:pose-ae-train,代码行数:33,代码来源:loss.py


示例16: forward

    def forward(self, x, k=1):
        
        self.B = x.size()[0]
        mu, logvar = self.encode(x)
        z, logpz, logqz = self.sample(mu, logvar, k=k)
        # x_hat = self.decode(z)
        x_mean, x_logvar = self.decode(z)  #[P,B,1]

        # logpx = log_bernoulli(x_hat, x)  #[P,B]
        logpx = lognormal_decoder(x, x_mean, x_logvar)  #[P,B]



        # elbo = logpx + .00000001*logpz - logqz  #[P,B]
        elbo = logpx + logpz - logqz  #[P,B]


        if k>1:
            max_ = torch.max(elbo, 0)[0] #[B]
            elbo = torch.log(torch.mean(torch.exp(elbo - max_), 0)) + max_ #[B]

        elbo = torch.mean(elbo) #[1]

        #for printing
        logpx = torch.mean(logpx)
        logpz = torch.mean(logpz)
        logqz = torch.mean(logqz)
        # self.x_hat_sigmoid = F.sigmoid(x_hat)

        return elbo, logpx, logpz, logqz
开发者ID:chriscremer,项目名称:Other_Code,代码行数:30,代码来源:iwae.py


示例17: global_pooling

def global_pooling(x):
	# input x [n, c, h, w]
	# output l [n, c]
	s = torch.mean(x, dim=-1)
	s = torch.mean(s, dim=-1)

	return s
开发者ID:Betterthinking,项目名称:CliqueNet-pytorch,代码行数:7,代码来源:net.py


示例18: calculate_loss

    def calculate_loss(self, x, beta=1., average=False):
        # pass through VAE
        x_mean, x_logvar, z1_q, z1_q_mean, z1_q_logvar, z2_q, z2_q_mean, z2_q_logvar, z1_p_mean, z1_p_logvar = self.forward(x)

        # RE
        if self.args.input_type == 'binary':
            RE = log_Bernoulli(x, x_mean, dim=1)
        elif self.args.input_type == 'gray' or self.args.input_type == 'continuous':
            RE = -log_Logistic_256(x, x_mean, x_logvar, dim=1)
        else:
            raise Exception('Wrong input type!')

        # KL
        log_p_z1 = log_Normal_diag(z1_q, z1_p_mean, z1_p_logvar, dim=1)
        log_q_z1 = log_Normal_diag(z1_q, z1_q_mean, z1_q_logvar, dim=1)
        log_p_z2 = self.log_p_z2(z2_q)
        log_q_z2 = log_Normal_diag(z2_q, z2_q_mean, z2_q_logvar, dim=1)
        KL = -(log_p_z1 + log_p_z2 - log_q_z1 - log_q_z2)

        # full loss
        loss = -RE + beta * KL

        if average:
            loss = torch.mean(loss)
            RE = torch.mean(RE)
            KL = torch.mean(KL)

        return loss, RE, KL
开发者ID:jramapuram,项目名称:vae_vampprior,代码行数:28,代码来源:convHVAE_2level.py


示例19: forward

    def forward(self, z_seq, a_seq, term_seq):
        # x: [B,2,84,84]
        # T = x.size()[0]

        h = torch.zeros(1,self.h_size).cuda()
        z_losses = []
        term_losses = []
        for t in range(len(term_seq)-1):

            inter = self.encode_az(a_seq[t], z_seq[t])
            h = self.update_h(h, inter)
            z_pred, term_pred = self.predict_output(h, inter)

            z_loss = torch.mean((z_seq[t+1] - z_pred)**2)
            term_loss = F.binary_cross_entropy_with_logits(input=term_pred, target=term_seq[t+1])

            z_losses.append(z_loss)
            term_losses.append(term_loss)

        z_loss = torch.mean(torch.stack(z_losses))
        term_loss = torch.mean(torch.stack(term_losses)) 

        loss = z_loss + term_loss 

        return loss, z_loss, term_loss
开发者ID:chriscremer,项目名称:Other_Code,代码行数:25,代码来源:train_rnn.py


示例20: forward

    def forward(self, frame, DQNs):
        # x: [B,2,84,84]
        self.B = frame.size()[0]

        #Predict mask
        mask = self.predict_mask(frame)  #[B,2,210,160]
        # print (mask.size())
        
        mask = mask.repeat(1,3,1,1)
        #frame: [B,6,210,160]
        masked_frame = frame * mask

        difs= []
        for i in range(len(DQNs)):
            q_mask = DQNs[i](masked_frame)
            q_real = DQNs[i](frame)
            dif = torch.mean((q_mask-q_real)**2)  #[B,A]
            difs.append(dif)

        difs = torch.stack(difs)
        dif = torch.mean(difs)


        mask = mask.view(self.B, -1)
        mask_sum = torch.mean(torch.sum(mask, dim=1)) * .0000001

        loss = dif + mask_sum

        return loss, dif, mask_sum
开发者ID:chriscremer,项目名称:Other_Code,代码行数:29,代码来源:learn_mask_multipleDQN.py



注:本文中的torch.mean函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。


鲜花

握手

雷人

路过

鸡蛋
该文章已有0人参与评论

请发表评论

全部评论

专题导读
上一篇:
Python torch.min函数代码示例发布时间:2022-05-27
下一篇:
Python torch.max函数代码示例发布时间:2022-05-27
热门推荐
阅读排行榜

扫描微信二维码

查看手机版网站

随时了解更新最新资讯

139-2527-9053

在线客服(服务时间 9:00~18:00)

在线QQ客服
地址:深圳市南山区西丽大学城创智工业园
电邮:jeky_zhao#qq.com
移动电话:139-2527-9053

Powered by 互联科技 X3.4© 2001-2213 极客世界.|Sitemap