本文整理汇总了Python中torch.autograd.Variable类的典型用法代码示例。如果您正苦于以下问题:Python Variable类的具体用法?Python Variable怎么用?Python Variable使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。
在下文中一共展示了Variable类的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。
示例1: train
def train(ep):
model.train()
total_loss = 0
count = 0
train_idx_list = np.arange(len(X_train), dtype="int32")
np.random.shuffle(train_idx_list)
for idx in train_idx_list:
data_line = X_train[idx]
x, y = Variable(data_line[:-1]), Variable(data_line[1:])
if args.cuda:
x, y = x.cuda(), y.cuda()
optimizer.zero_grad()
output = model(x.unsqueeze(0)).squeeze(0)
loss = -torch.trace(torch.matmul(y, torch.log(output).float().t()) +
torch.matmul((1 - y), torch.log(1 - output).float().t()))
total_loss += loss.data[0]
count += output.size(0)
if args.clip > 0:
torch.nn.utils.clip_grad_norm(model.parameters(), args.clip)
loss.backward()
optimizer.step()
if idx > 0 and idx % args.log_interval == 0:
cur_loss = total_loss / count
print("Epoch {:2d} | lr {:.5f} | loss {:.5f}".format(ep, lr, cur_loss))
total_loss = 0.0
count = 0
开发者ID:wasaCheney,项目名称:TCN,代码行数:28,代码来源:music_test.py
示例2: predict
def predict(self, dataset):
"""Predict target for dataset.
Parameters:
----------
dataset (dict): dictionary with the testing dataset -
X_wide_test, X_deep_test, target
Returns:
--------
array-like with the target for dataset
"""
X_w = Variable(torch.from_numpy(dataset.wide)).float()
X_d = Variable(torch.from_numpy(dataset.deep))
if use_cuda:
X_w, X_d = X_w.cuda(), X_d.cuda()
# set the model in evaluation mode so dropout is not applied
net = self.eval()
pred = net(X_w,X_d).cpu()
if self.method == "regression":
return pred.squeeze(1).data.numpy()
if self.method == "logistic":
return (pred > 0.5).squeeze(1).data.numpy()
if self.method == "multiclass":
_, pred_cat = torch.max(pred, 1)
return pred_cat.data.numpy()
开发者ID:KyrieChin,项目名称:Wide-and-Deep-PyTorch,代码行数:29,代码来源:torch_model.py
示例3: forward
def forward(self, k, x, logposterior):
'''
k: number of samples
x: [B,X]
logposterior(z) -> [P,B]
'''
self.B = x.size()[0]
self.P = k
#Encode
out = x
for i in range(len(self.encoder_weights)-1):
out = self.act_func(self.encoder_weights[i](out))
out = self.encoder_weights[-1](out)
mean = out[:,:self.z_size]
logvar = out[:,self.z_size:]
#Sample
eps = Variable(torch.FloatTensor(k, self.B, self.z_size).normal_().type(self.dtype)) #[P,B,Z]
z = eps.mul(torch.exp(.5*logvar)) + mean #[P,B,Z]
logqz = lognormal(z, mean, logvar) #[P,B]
logdetsum = 0.
for i in range(self.n_flows):
z, logdet = self.norm_flow(self.params[i],z)
logdetsum += logdet
return z, logqz-logdetsum
开发者ID:chriscremer,项目名称:Other_Code,代码行数:31,代码来源:approx_posteriors_v6.py
示例4: forward_single_image_tensor
def forward_single_image_tensor(self, img_tensor):
"""
Simple forward pass on the network.
Normalize the image if we are in TEST mode
If we are in TRAIN mode then assume the dataset object has already normalized
the image
:param img_tensor: torch.FloatTensor with shape [3,H,W]
:type img_tensor:
:return: torch.FloatTensor with shape [H, W, D]
:rtype:
"""
assert len(img_tensor.shape) == 3
# transform to shape [1,3,H,W]
img_tensor = img_tensor.unsqueeze(0)
# The fcn throws and error if we don't use a variable here . . .
# Maybe it's because it is in train mode?
img_tensor = Variable(img_tensor.cuda(), requires_grad=False)
res = self.forward(img_tensor) # shape [1,D,H,W]
# print "res.shape 1", res.shape
res = res.squeeze(0) # shape [D,H,W]
# print "res.shape 2", res.shape
res = res.permute(1,2,0) # shape [H,W,D]
# print "res.shape 3", res.shape
return res
开发者ID:shooter2062424,项目名称:pytorch-dense-correspondence,代码行数:34,代码来源:dense_correspondence_network.py
示例5: generate
def generate(model, start_words, ix2word, word2ix, prefix_words=None):
"""
给定几个词,根据这几个词接着生成一首完整的诗歌
start_words:u'春江潮水连海平'
比如start_words 为 春江潮水连海平,可以生成:
"""
results = list(start_words)
start_word_len = len(start_words)
# 手动设置第一个词为<START>
input = Variable(t.Tensor([word2ix['<START>']]).view(1, 1).long())
if opt.use_gpu: input = input.cuda()
hidden = None
if prefix_words:
for word in prefix_words:
output, hidden = model(input, hidden)
input = Variable(input.data.new([word2ix[word]])).view(1, 1)
for i in range(opt.max_gen_len):
output, hidden = model(input, hidden)
if i < start_word_len:
w = results[i]
input = Variable(input.data.new([word2ix[w]])).view(1, 1)
else:
top_index = output.data[0].topk(1)[1][0]
w = ix2word[top_index]
results.append(w)
input = Variable(input.data.new([top_index])).view(1, 1)
if w == '<EOP>':
del results[-1]
break
return results
开发者ID:Zhuysheng,项目名称:pytorch-book,代码行数:34,代码来源:main.py
示例6: _pad_packed_sequence
def _pad_packed_sequence(sequence, batch_first=False, padding_value=0):
var_data, batch_sizes = sequence
max_batch_size = int(batch_sizes[0])
output = var_data.data.new(len(batch_sizes), max_batch_size, *var_data.size()[1:]).fill_(padding_value)
output = Variable(output)
lengths = []
data_offset = 0
prev_batch_size = int(batch_sizes[0])
prev_i = 0
for i, batch_size in enumerate(batch_sizes.tolist() + [0]):
if batch_size != prev_batch_size:
l = prev_batch_size * (i - prev_i)
tmp = var_data[data_offset:data_offset + l]
output[prev_i:i, :prev_batch_size] = tmp.view(i - prev_i, prev_batch_size, *tmp.size()[1:])
data_offset += l
prev_i = i
dec = prev_batch_size - batch_size
if dec > 0:
lengths.extend((i,) * dec)
prev_batch_size = batch_size
lengths.reverse()
if batch_first:
output = output.transpose(0, 1)
# This Variable doesn't actually have any history (well,
# technically it does; it's just untracked), it is purely here to
# make ONNX export easier. That is to say, from an autodiff
# standpoint this doesn't make any sense.
return output, Variable(torch.LongTensor(lengths))
开发者ID:Jsmilemsj,项目名称:pytorch,代码行数:31,代码来源:rnn.py
示例7: probs
def probs(self, generator, outputs, vocab_pointer_switches, context_question_switches,
context_attention, question_attention,
context_indices, question_indices,
oov_to_limited_idx):
size = list(outputs.size())
size[-1] = self.generative_vocab_size
scores = generator(outputs.view(-1, outputs.size(-1))).view(size)
p_vocab = F.softmax(scores, dim=scores.dim()-1)
scaled_p_vocab = vocab_pointer_switches.expand_as(p_vocab) * p_vocab
effective_vocab_size = self.generative_vocab_size + len(oov_to_limited_idx)
if self.generative_vocab_size < effective_vocab_size:
size[-1] = effective_vocab_size - self.generative_vocab_size
buff = Variable(scaled_p_vocab.data.new(*size).fill_(EPSILON))
scaled_p_vocab = torch.cat([scaled_p_vocab, buff], dim=buff.dim()-1)
p_context_ptr = Variable(scaled_p_vocab.data.new(*scaled_p_vocab.size()).fill_(EPSILON))
p_context_ptr.scatter_add_(p_context_ptr.dim()-1, context_indices.unsqueeze(1).expand_as(context_attention), context_attention)
scaled_p_context_ptr = (context_question_switches * (1 - vocab_pointer_switches)).expand_as(p_context_ptr) * p_context_ptr
p_question_ptr = Variable(scaled_p_vocab.data.new(*scaled_p_vocab.size()).fill_(EPSILON))
p_question_ptr.scatter_add_(p_question_ptr.dim()-1, question_indices.unsqueeze(1).expand_as(question_attention), question_attention)
scaled_p_question_ptr = ((1 - context_question_switches) * (1 - vocab_pointer_switches)).expand_as(p_question_ptr) * p_question_ptr
probs = scaled_p_vocab + scaled_p_context_ptr + scaled_p_question_ptr
return probs
开发者ID:AhlamMD,项目名称:decaNLP,代码行数:28,代码来源:multitask_question_answering_network.py
示例8: sample
def sample(self, mu, logvar, k):
# print (mu)
# print (logvar)
if torch.cuda.is_available():
eps = Variable(torch.FloatTensor(k, self.B, self.z_size).normal_()).cuda() #[P,B,Z]
# print (mu.size())
# print (logvar.size())
# print (eps.size())
z = eps.mul(torch.exp(.5*logvar)) + mu #[P,B,Z]
logpz = lognormal(z, Variable(torch.zeros(self.B, self.z_size).cuda()),
Variable(torch.zeros(self.B, self.z_size)).cuda()) #[P,B]
# logqz = lognormal(z, mu, logvar)
logqz = lognormal(z, Variable(mu.data), Variable(logvar.data))
else:
eps = Variable(torch.FloatTensor(k, self.B, self.z_size).normal_())#[P,B,Z]
z = eps.mul(torch.exp(.5*logvar)) + mu #[P,B,Z]
logpz = lognormal(z, Variable(torch.zeros(self.B, self.z_size)),
Variable(torch.zeros(self.B, self.z_size))) #[P,B]
logqz = lognormal(z, mu, logvar)
return z, logpz, logqz
开发者ID:chriscremer,项目名称:Other_Code,代码行数:32,代码来源:vae_with_policy.py
示例9: __val
def __val(self):
"""
Validation function during the train phase.
"""
self.seg_net.eval()
start_time = time.time()
for j, data_tuple in enumerate(self.val_loader):
# Change the data type.
inputs = Variable(data_tuple[0].cuda(async=True), volatile=True)
targets = Variable(data_tuple[1].cuda(async=True), volatile=True)
# Forward pass.
outputs = self.seg_net(inputs)
# Compute the loss of the val batch.
loss_pixel = self.pixel_loss(outputs, targets)
loss = loss_pixel
self.val_losses.update(loss.data[0], inputs.size(0))
# Update the vars of the val phase.
self.batch_time.update(time.time() - start_time)
start_time = time.time()
self.module_utilizer.save_net(self.seg_net, self.iters)
# Print the log info & reset the states.
Log.info(
'Test Time {batch_time.sum:.3f}s, ({batch_time.avg:.3f})\t'
'Loss {loss.avg:.8f}\n'.format(
batch_time=self.batch_time, loss=self.val_losses))
self.batch_time.reset()
self.val_losses.reset()
self.seg_net.train()
开发者ID:shubhampachori12110095,项目名称:pytorch-cv,代码行数:32,代码来源:fcn_segmentor.py
示例10: random_batch
def random_batch(batch_size=3):
input_seqs = []
target_seqs = []
# Choose random pairs
for i in range(batch_size):
pair = random.choice(pairs)
input_seqs.append(indexes_from_sentence(input_lang, pair[0]))
target_seqs.append(indexes_from_sentence(output_lang, pair[1]))
# Zip into pairs, sort by length (descending), unzip
seq_pairs = sorted(zip(input_seqs, target_seqs), key=lambda p: len(p[0]), reverse=True)
input_seqs, target_seqs = zip(*seq_pairs)
# For input and target sequences, get array of lengths and pad with 0s to max length
input_lengths = [len(s) for s in input_seqs]
input_padded = [pad_seq(s, max(input_lengths)) for s in input_seqs]
target_lengths = [len(s) for s in target_seqs]
target_padded = [pad_seq(s, max(target_lengths)) for s in target_seqs]
# Turn padded arrays into (batch x seq) tensors, transpose into (seq x batch)
input_var = Variable(torch.LongTensor(input_padded)).transpose(0, 1)
target_var = Variable(torch.LongTensor(target_padded)).transpose(0, 1)
if USE_CUDA:
input_var = input_var.cuda()
target_var = target_var.cuda()
return input_var, input_lengths, target_var, target_lengths
开发者ID:niluanwudidadi,项目名称:practical-pytorch,代码行数:29,代码来源:seq2seq-translation-batched.py
示例11: update
def update(self):
next_value = self.actor_critic(Variable(self.rollouts.states[-1], volatile=True))[0].data
self.rollouts.compute_returns(next_value, self.use_gae, self.gamma, self.tau)
# values, action_log_probs, dist_entropy = self.actor_critic.evaluate_actions(
# Variable(self.rollouts.states[:-1].view(-1, *self.obs_shape)),
# Variable(self.rollouts.actions.view(-1, self.action_shape)))
values = torch.cat(self.rollouts.value_preds, 0).view(self.num_steps, self.num_processes, 1)
action_log_probs = torch.cat(self.rollouts.action_log_probs).view(self.num_steps, self.num_processes, 1)
dist_entropy = torch.cat(self.rollouts.dist_entropy).view(self.num_steps, self.num_processes, 1)
self.rollouts.value_preds = []
self.rollouts.action_log_probs = []
self.rollouts.dist_entropy = []
advantages = Variable(self.rollouts.returns[:-1]) - values
value_loss = advantages.pow(2).mean()
action_loss = -(Variable(advantages.data) * action_log_probs).mean()
self.optimizer.zero_grad()
cost = action_loss + value_loss*self.value_loss_coef - dist_entropy.mean()*self.entropy_coef
cost.backward()
nn.utils.clip_grad_norm(self.actor_critic.parameters(), self.grad_clip)
self.optimizer.step()
开发者ID:chriscremer,项目名称:Other_Code,代码行数:33,代码来源:a2c_agents.py
示例12: F_affine2d
def F_affine2d(x, matrix, center=True):
"""
2D Affine image transform on torch.autograd.Variable
"""
if matrix.dim() == 2:
matrix = matrix.view(-1,2,3)
A_batch = matrix[:,:,:2]
if A_batch.size(0) != x.size(0):
A_batch = A_batch.repeat(x.size(0),1,1)
b_batch = matrix[:,:,2].unsqueeze(1)
# make a meshgrid of normal coordinates
_coords = th_iterproduct(x.size(1),x.size(2))
coords = Variable(_coords.unsqueeze(0).repeat(x.size(0),1,1).float(),
requires_grad=False)
if center:
# shift the coordinates so center is the origin
coords[:,:,0] = coords[:,:,0] - (x.size(1) / 2. + 0.5)
coords[:,:,1] = coords[:,:,1] - (x.size(2) / 2. + 0.5)
# apply the coordinate transformation
new_coords = coords.bmm(A_batch.transpose(1,2)) + b_batch.expand_as(coords)
if center:
# shift the coordinates back so origin is origin
new_coords[:,:,0] = new_coords[:,:,0] + (x.size(1) / 2. + 0.5)
new_coords[:,:,1] = new_coords[:,:,1] + (x.size(2) / 2. + 0.5)
# map new coordinates using bilinear interpolation
x_transformed = F_bilinear_interp2d(x, new_coords)
return x_transformed
开发者ID:BrianDo2005,项目名称:torchsample,代码行数:33,代码来源:affine.py
示例13: forward
def forward(self, inputs): # inputs (bs,words/sentence) 10,7
bsz = inputs.size(0) # batch size might change
if inputs.size(1) < 3: # padding issues on really short sentences
pads = Variable(torch.zeros(bsz,3-inputs.size(1))).type(torch.LongTensor)
inputs = torch.cat([inputs,pads.cuda()],dim=1)
embeds = self.embeddings(inputs) # 10,h,300
embeds = embeds.unsqueeze(3)
embeds = embeds.permute(0,2,1,3)
s_embeds = self.s_embeddings(inputs)
s_embeds = s_embeds.unsqueeze(3)
s_embeds = s_embeds.permute(0,2,1,3)
out = torch.cat([embeds,s_embeds],dim=3)
#print(out.size())
fw3 = self.conv3(out) # 10,100,h,1
fw5 = self.conv5(out) # 10,100,h,1
fw7 = self.conv7(out) # 10,100,h,1
out = torch.cat([fw3,fw5,fw7],dim=1)
out = F.relu(out) # 10,300,h/3,1
#out = self.avgpool(out)
#out = F.relu(self.conv(out))
#print(out.size())
#out = out.view(bsz,n_featmaps*3,-1,2) # 10,300,7
#print(out.size())
out = self.maxpool(out) # 10,300,1,1
out = out.view(bsz,-1) # 10,600
out = self.dropout(out) # 10,2
out = self.linear(out) # 10,2
return out
开发者ID:anihamde,项目名称:cs287-s18,代码行数:28,代码来源:cnn_eval.py
示例14: update_parameters
def update_parameters(self, batch):
state_batch = Variable(torch.cat(batch.state))
action_batch = Variable(torch.cat(batch.action))
reward_batch = Variable(torch.cat(batch.reward))
mask_batch = Variable(torch.cat(batch.mask))
next_state_batch = Variable(torch.cat(batch.next_state))
next_action_batch = self.actor_target(next_state_batch)
next_state_action_values = self.critic_target(next_state_batch, next_action_batch)
reward_batch = reward_batch.unsqueeze(1)
mask_batch = mask_batch.unsqueeze(1)
expected_state_action_batch = reward_batch + (self.gamma * mask_batch * next_state_action_values)
self.critic_optim.zero_grad()
state_action_batch = self.critic((state_batch), (action_batch))
value_loss = F.mse_loss(state_action_batch, expected_state_action_batch)
value_loss.backward()
self.critic_optim.step()
self.actor_optim.zero_grad()
policy_loss = -self.critic((state_batch),self.actor((state_batch)))
policy_loss = policy_loss.mean()
policy_loss.backward()
self.actor_optim.step()
soft_update(self.actor_target, self.actor, self.tau)
soft_update(self.critic_target, self.critic, self.tau)
return value_loss.item(), policy_loss.item()
开发者ID:lenvdv,项目名称:pytorch-ddpg-naf,代码行数:34,代码来源:ddpg.py
示例15: show_result
def show_result(num_epoch, show = False, save = False, path = 'result.png', isFix=False):
z_ = torch.randn((5*5, 100)).view(-1, 100, 1, 1)
z_ = Variable(z_.cuda(), volatile=True)
G.eval()
if isFix:
test_images = G(fixed_z_)
else:
test_images = G(z_)
G.train()
size_figure_grid = 5
fig, ax = plt.subplots(size_figure_grid, size_figure_grid, figsize=(5, 5))
for i, j in itertools.product(range(size_figure_grid), range(size_figure_grid)):
ax[i, j].get_xaxis().set_visible(False)
ax[i, j].get_yaxis().set_visible(False)
for k in range(5*5):
i = k // 5
j = k % 5
ax[i, j].cla()
ax[i, j].imshow((test_images[k].cpu().data.numpy().transpose(1, 2, 0) + 1) / 2)
label = 'Epoch {0}'.format(num_epoch)
fig.text(0.5, 0.04, label, ha='center')
plt.savefig(path)
if show:
plt.show()
else:
plt.close()
开发者ID:KudoLayton,项目名称:pytorch-MNIST-CelebA-GAN-DCGAN,代码行数:31,代码来源:pytorch_CelebA_DCGAN.py
示例16: train
def train(dataloader):
uf.train()
total_loss = 0
total_items = 0
start_time = time.time()
for i_batch, batch in enumerate(dataloader):
output_seq = Variable(batch['output_seq'])
del (batch['output_seq'])
for k in batch:
batch[k] = Variable(batch[k])
if DEVICE_NO != -1:
output_seq = output_seq.cuda(DEVICE_NO)
for k in batch:
batch[k] = batch[k].cuda(DEVICE_NO)
uf.zero_grad()
pred = uf.forward(**batch)
pred = pred.view(-1, pred.size(-1))
output_seq = output_seq.view(-1)
loss = criteria(pred, output_seq)
loss.backward()
num_items = len([x for x in output_seq if int(x) != criteria.ignore_index])
total_loss += num_items * loss.data
total_items += num_items
optimizer.step()
if i_batch % log_interval == 0 and i_batch > 0:
cur_loss = total_loss[0] / total_items
elapsed = time.time() - start_time
print('| epoch {:3d} | {:5d}/{:5d} batches | lr {:04.2f} | ms/batch {:5.2f} | '
'loss {:5.2f} | ppl {:8.2f}'.format(
epoch, i_batch, len(dataloader.dataset) // dataloader.batch_size, optimizer.param_groups[0]['lr'],
elapsed * 1000 / log_interval, cur_loss, math.exp(cur_loss)))
total_loss = 0
total_items = 0
start_time = time.time()
开发者ID:ParkTong,项目名称:Unified-Architecture-for-Semantic-Role-Labeling-and-Relation-Classification,代码行数:35,代码来源:train.py
示例17: visualizeModel
def visualizeModel(model, numImages=6):
wasTraining = model.training
model.eval()
imagesSoFar = 0
fig = plt.figure()
for i, (inputs, labels) in enumerate(dataloaders['val']):
if use_gpu:
inputs, labels = inputs.cuda(), labels.cuda()
inputs, labels = Variable(inputs), Variable(labels)
outputs = model(inputs)
_, preds = torch.max(outputs.data, 1)
for j in range(inputs.size(0)):
imagesSoFar += 1
nCols = 2
ax = plt.subplot(numImages // nCols, nCols, imagesSoFar)
ax.axis('off')
ax.set_title('predicted: {}'.format(class_names[preds[j]]))
imshow(inputs.cpu().data[j])
if imagesSoFar == numImages:
model.train(mode=wasTraining)
return
model.train(mode=wasTraining)
开发者ID:Daiver,项目名称:jff,代码行数:26,代码来源:transfer_tutor1.py
示例18: vector_grad
def vector_grad():
x = Variable(torch.ones(2)*3, requires_grad=True)
y = Variable(torch.ones(2)*4, requires_grad=True)
z = x.pow(2) + 3*y.pow(2)
z.backward(torch.ones(2))
print(x.grad)
print(y.grad)
开发者ID:gonglixue,项目名称:PRML_Python,代码行数:7,代码来源:gradient.py
示例19: train
def train(epoch):
epoch_loss = 0
for iteration, batch in enumerate(training_data_loader, 1):
randH = random.randint(0, opt.remsize)
randW = random.randint(0, opt.remsize)
input = Variable(batch[0][:, :, randH:randH + opt.size, randW:randW + opt.size])
target = Variable(batch[1][:, :,
randH + target_gap:randH + target_gap + target_size,
randW + target_gap:randW + target_gap + target_size])
#target =target.squeeze(1)
#print(target.data.size())
if cuda:
input = input.cuda()
target = target.cuda()
input = unet(input)
#print(input.data.size())
loss = criterion( input, target)
epoch_loss += loss.data[0]
loss.backward()
optimizer.step()
if iteration%10 is 0:
print("===> Epoch[{}]({}/{}): Loss: {:.4f}".format(epoch, iteration, len(training_data_loader), loss.data[0]))
imgout = input.data/2 +1
torchvision.utils.save_image(imgout,"/home/wcd/PytorchProject/Unet/unetdata/checkpoint/epch_"+str(epoch)+'.jpg')
print("===> Epoch {} Complete: Avg. Loss: {:.4f}".format(epoch, epoch_loss / len(training_data_loader)))
开发者ID:ascenoputing,项目名称:SemanticSegmentation_DL,代码行数:25,代码来源:train.py
示例20: to_variable
def to_variable(numpy_data, volatile=False, is_cuda=True):
numpy_data = numpy_data.astype(np.float32)
torch_data = torch.from_numpy(numpy_data).float()
variable = Variable(torch_data, volatile=volatile)
if is_cuda:
variable = variable.cuda()
return variable
开发者ID:huanglizhi,项目名称:Pytorch_Mask_RCNN,代码行数:7,代码来源:eval.py
注:本文中的torch.autograd.Variable类示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。 |
请发表评论