本文整理汇总了Python中torch.ones函数的典型用法代码示例。如果您正苦于以下问题:Python ones函数的具体用法?Python ones怎么用?Python ones使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了ones函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。
示例1: test2
def test2():
x = torch.ones(1, 2)
x = Variable(x)
y = torch.ones(1, 2)
z = x + 0.5
print(x.data)
开发者ID:gonglixue,项目名称:PRML_Python,代码行数:7,代码来源:test.py
示例2: test_hmc_conjugate_gaussian
def test_hmc_conjugate_gaussian(fixture,
num_samples,
warmup_steps,
hmc_params,
expected_means,
expected_precs,
mean_tol,
std_tol):
pyro.get_param_store().clear()
hmc_kernel = HMC(fixture.model, **hmc_params)
mcmc_run = MCMC(hmc_kernel, num_samples, warmup_steps).run(fixture.data)
for i in range(1, fixture.chain_len + 1):
param_name = 'loc_' + str(i)
marginal = EmpiricalMarginal(mcmc_run, sites=param_name)
latent_loc = marginal.mean
latent_std = marginal.variance.sqrt()
expected_mean = torch.ones(fixture.dim) * expected_means[i - 1]
expected_std = 1 / torch.sqrt(torch.ones(fixture.dim) * expected_precs[i - 1])
# Actual vs expected posterior means for the latents
logger.info('Posterior mean (actual) - {}'.format(param_name))
logger.info(latent_loc)
logger.info('Posterior mean (expected) - {}'.format(param_name))
logger.info(expected_mean)
assert_equal(rmse(latent_loc, expected_mean).item(), 0.0, prec=mean_tol)
# Actual vs expected posterior precisions for the latents
logger.info('Posterior std (actual) - {}'.format(param_name))
logger.info(latent_std)
logger.info('Posterior std (expected) - {}'.format(param_name))
logger.info(expected_std)
assert_equal(rmse(latent_std, expected_std).item(), 0.0, prec=std_tol)
开发者ID:lewisKit,项目名称:pyro,代码行数:32,代码来源:test_hmc.py
示例3: __init__
def __init__(self, hidden_size, num_inputs, action_space):
super(Policy, self).__init__()
self.action_space = action_space
num_outputs = action_space.shape[0]
self.bn0 = nn.BatchNorm1d(num_inputs)
self.bn0.weight.data.fill_(1)
self.bn0.bias.data.fill_(0)
self.linear1 = nn.Linear(num_inputs, hidden_size)
self.bn1 = nn.BatchNorm1d(hidden_size)
self.bn1.weight.data.fill_(1)
self.bn1.bias.data.fill_(0)
self.linear2 = nn.Linear(hidden_size, hidden_size)
self.bn2 = nn.BatchNorm1d(hidden_size)
self.bn2.weight.data.fill_(1)
self.bn2.bias.data.fill_(0)
self.V = nn.Linear(hidden_size, 1)
self.V.weight.data.mul_(0.1)
self.V.bias.data.mul_(0.1)
self.mu = nn.Linear(hidden_size, num_outputs)
self.mu.weight.data.mul_(0.1)
self.mu.bias.data.mul_(0.1)
self.L = nn.Linear(hidden_size, num_outputs ** 2)
self.L.weight.data.mul_(0.1)
self.L.bias.data.mul_(0.1)
self.tril_mask = Variable(torch.tril(torch.ones(
num_outputs, num_outputs), diagonal=-1).unsqueeze(0))
self.diag_mask = Variable(torch.diag(torch.diag(
torch.ones(num_outputs, num_outputs))).unsqueeze(0))
开发者ID:lenvdv,项目名称:pytorch-ddpg-naf,代码行数:35,代码来源:naf.py
示例4: guide
def guide():
mu_q = pyro.param("mu_q", Variable(self.analytic_mu_n.data + 0.094 * torch.ones(2),
requires_grad=True))
log_sig_q = pyro.param("log_sig_q", Variable(
self.analytic_log_sig_n.data - 0.11 * torch.ones(2), requires_grad=True))
sig_q = torch.exp(log_sig_q)
trivial_baseline = pyro.module("mu_baseline", pt_mu_baseline, tags="baseline")
baseline_value = trivial_baseline(ng_ones(1))
mu_latent = pyro.sample("mu_latent",
dist.Normal(mu_q, sig_q, reparameterized=False),
baseline=dict(baseline_value=baseline_value))
def obs_inner(i, _i, _x):
for k in range(n_superfluous_top + n_superfluous_bottom):
z_baseline = pyro.module("z_baseline_%d_%d" % (i, k),
pt_superfluous_baselines[3 * k + i], tags="baseline")
baseline_value = z_baseline(mu_latent.detach()).unsqueeze(-1)
mean_i = pyro.param("mean_%d_%d" % (i, k),
Variable(0.5 * torch.ones(4 - i, 1), requires_grad=True))
pyro.sample("z_%d_%d" % (i, k),
dist.Normal(mean_i, ng_ones(4 - i, 1), reparameterized=False),
baseline=dict(baseline_value=baseline_value))
def obs_outer(i, x):
pyro.map_data("map_obs_inner_%d" % i, x, lambda _i, _x:
obs_inner(i, _i, _x), batch_size=4 - i)
pyro.map_data("map_obs_outer", [self.data_tensor[0:4, :], self.data_tensor[4:7, :],
self.data_tensor[7:9, :]],
lambda i, x: obs_outer(i, x), batch_size=3)
return mu_latent
开发者ID:Magica-Chen,项目名称:pyro,代码行数:32,代码来源:test_tracegraph_elbo.py
示例5: forward
def forward(self, input_features, adj):
#x = self.conv1(input_features, adj)
#x = self.bn1(x)
#x = self.act(x)
#x = self.conv2(x, adj)
#x = self.bn2(x)
# pool over all nodes
#graph_h = self.pool_graph(x)
graph_h = input_features.view(-1, self.max_num_nodes * self.max_num_nodes)
# vae
h_decode, z_mu, z_lsgms = self.vae(graph_h)
out = F.sigmoid(h_decode)
out_tensor = out.cpu().data
recon_adj_lower = self.recover_adj_lower(out_tensor)
recon_adj_tensor = self.recover_full_adj_from_lower(recon_adj_lower)
# set matching features be degree
out_features = torch.sum(recon_adj_tensor, 1)
adj_data = adj.cpu().data[0]
adj_features = torch.sum(adj_data, 1)
S = self.edge_similarity_matrix(adj_data, recon_adj_tensor, adj_features, out_features,
self.deg_feature_similarity)
# initialization strategies
init_corr = 1 / self.max_num_nodes
init_assignment = torch.ones(self.max_num_nodes, self.max_num_nodes) * init_corr
#init_assignment = torch.FloatTensor(4, 4)
#init.uniform(init_assignment)
assignment = self.mpm(init_assignment, S)
#print('Assignment: ', assignment)
# matching
# use negative of the assignment score since the alg finds min cost flow
row_ind, col_ind = scipy.optimize.linear_sum_assignment(-assignment.numpy())
print('row: ', row_ind)
print('col: ', col_ind)
# order row index according to col index
#adj_permuted = self.permute_adj(adj_data, row_ind, col_ind)
adj_permuted = adj_data
adj_vectorized = adj_permuted[torch.triu(torch.ones(self.max_num_nodes,self.max_num_nodes) )== 1].squeeze_()
adj_vectorized_var = Variable(adj_vectorized).cuda()
#print(adj)
#print('permuted: ', adj_permuted)
#print('recon: ', recon_adj_tensor)
adj_recon_loss = self.adj_recon_loss(adj_vectorized_var, out[0])
print('recon: ', adj_recon_loss)
print(adj_vectorized_var)
print(out[0])
loss_kl = -0.5 * torch.sum(1 + z_lsgms - z_mu.pow(2) - z_lsgms.exp())
loss_kl /= self.max_num_nodes * self.max_num_nodes # normalize
print('kl: ', loss_kl)
loss = adj_recon_loss + loss_kl
return loss
开发者ID:taeyen,项目名称:graph-generation,代码行数:60,代码来源:model.py
示例6: test_elmo_lstm_cell_completes_forward_pass
def test_elmo_lstm_cell_completes_forward_pass(self):
input_tensor = torch.autograd.Variable(torch.rand(4, 5, 3))
input_tensor[1, 4:, :] = 0.
input_tensor[2, 2:, :] = 0.
input_tensor[3, 1:, :] = 0.
initial_hidden_state = Variable(torch.ones([1, 4, 5]))
initial_memory_state = Variable(torch.ones([1, 4, 7]))
lstm = LstmCellWithProjection(input_size=3,
hidden_size=5,
cell_size=7,
memory_cell_clip_value=2,
state_projection_clip_value=1)
output_sequence, lstm_state = lstm(input_tensor, [5, 4, 2, 1],
(initial_hidden_state, initial_memory_state))
numpy.testing.assert_array_equal(output_sequence.data[1, 4:, :].numpy(), 0.0)
numpy.testing.assert_array_equal(output_sequence.data[2, 2:, :].numpy(), 0.0)
numpy.testing.assert_array_equal(output_sequence.data[3, 1:, :].numpy(), 0.0)
# Test the state clipping.
numpy.testing.assert_array_less(output_sequence.data.numpy(), 1.0)
numpy.testing.assert_array_less(-output_sequence.data.numpy(), 1.0)
# LSTM state should be (num_layers, batch_size, hidden_size)
assert list(lstm_state[0].size()) == [1, 4, 5]
# LSTM memory cell should be (num_layers, batch_size, cell_size)
assert list((lstm_state[1].size())) == [1, 4, 7]
# Test the cell clipping.
numpy.testing.assert_array_less(lstm_state[0].data.numpy(), 2.0)
numpy.testing.assert_array_less(-lstm_state[0].data.numpy(), 2.0)
开发者ID:Jordan-Sauchuk,项目名称:allennlp,代码行数:32,代码来源:lstm_cell_with_projection_test.py
示例7: model
def model():
latent = named.Object("latent")
latent.list = named.List()
loc = latent.list.add().loc.param_(torch.zeros(1))
latent.dict = named.Dict()
foo = latent.dict["foo"].foo.sample_(dist.Normal(loc, torch.ones(1)))
latent.object.bar.sample_(dist.Normal(loc, torch.ones(1)), obs=foo)
开发者ID:lewisKit,项目名称:pyro,代码行数:7,代码来源:test_named.py
示例8: vector_grad
def vector_grad():
x = Variable(torch.ones(2)*3, requires_grad=True)
y = Variable(torch.ones(2)*4, requires_grad=True)
z = x.pow(2) + 3*y.pow(2)
z.backward(torch.ones(2))
print(x.grad)
print(y.grad)
开发者ID:gonglixue,项目名称:PRML_Python,代码行数:7,代码来源:gradient.py
示例9: bernoulli_normal_model
def bernoulli_normal_model():
bern_0 = pyro.sample('bern_0', dist.Bernoulli(torch.zeros(1) * 1e-2))
loc = torch.ones(1) if bern_0.item() else -torch.ones(1)
normal_0 = torch.ones(1)
pyro.sample('normal_0', dist.Normal(loc, torch.ones(1) * 1e-2),
obs=normal_0)
return [bern_0, normal_0]
开发者ID:lewisKit,项目名称:pyro,代码行数:7,代码来源:test_properties.py
示例10: test_growing_dataset
def test_growing_dataset(self):
dataset = [torch.ones(4) for _ in range(4)]
dataloader_seq = DataLoader(dataset, shuffle=False)
dataloader_shuffle = DataLoader(dataset, shuffle=True)
dataset.append(torch.ones(4))
self.assertEqual(len(dataloader_seq), 5)
self.assertEqual(len(dataloader_shuffle), 5)
开发者ID:RichieMay,项目名称:pytorch,代码行数:7,代码来源:test_dataloader.py
示例11: heads_tails
def heads_tails(n_ent, train_data, valid_data=None, test_data=None):
train_src, train_rel, train_dst = train_data
if valid_data:
valid_src, valid_rel, valid_dst = valid_data
else:
valid_src = valid_rel = valid_dst = []
if test_data:
test_src, test_rel, test_dst = test_data
else:
test_src = test_rel = test_dst = []
all_src = train_src + valid_src + test_src
all_rel = train_rel + valid_rel + test_rel
all_dst = train_dst + valid_dst + test_dst
heads = defaultdict(lambda: set())
tails = defaultdict(lambda: set())
for s, r, t in zip(all_src, all_rel, all_dst):
tails[(s, r)].add(t)
heads[(t, r)].add(s)
heads_sp = {}
tails_sp = {}
for k in tails.keys():
tails_sp[k] = torch.sparse.FloatTensor(torch.LongTensor([list(tails[k])]),
torch.ones(len(tails[k])), torch.Size([n_ent]))
for k in heads.keys():
heads_sp[k] = torch.sparse.FloatTensor(torch.LongTensor([list(heads[k])]),
torch.ones(len(heads[k])), torch.Size([n_ent]))
return heads_sp, tails_sp
开发者ID:cai-lw,项目名称:KBGAN,代码行数:27,代码来源:data_utils.py
示例12: test_cpu
def test_cpu(self):
create_extension(
name='test_extensions.cpulib',
headers=[test_dir + '/ffi/src/cpu/lib.h'],
sources=[
test_dir + '/ffi/src/cpu/lib1.c',
test_dir + '/ffi/src/cpu/lib2.c',
],
verbose=False,
).build()
from test_extensions import cpulib
tensor = torch.ones(2, 2).float()
cpulib.good_func(tensor, 2, 1.5)
self.assertEqual(tensor, torch.ones(2, 2) * 2 + 1.5)
new_tensor = cpulib.new_tensor(4)
self.assertEqual(new_tensor, torch.ones(4, 4) * 4)
f = cpulib.int_to_float(5)
self.assertIs(type(f), float)
self.assertRaises(TypeError,
lambda: cpulib.good_func(tensor.double(), 2, 1.5))
self.assertRaises(torch.FatalError,
lambda: cpulib.bad_func(tensor, 2, 1.5))
开发者ID:xiongyw,项目名称:pytorch,代码行数:26,代码来源:test_utils.py
示例13: test_python_ir
def test_python_ir(self):
x = Variable(torch.Tensor([0.4]), requires_grad=True)
y = Variable(torch.Tensor([0.7]), requires_grad=True)
def doit(x, y):
return torch.sigmoid(torch.tanh(x * (x + y)))
traced, _ = torch.jit.trace(doit, (x, y))
g = torch._C._jit_get_graph(traced)
g2 = torch._C.Graph()
g_to_g2 = {}
for node in g.inputs():
g_to_g2[node] = g2.addInput()
for node in g.nodes():
n_ = g2.createClone(node, lambda x: g_to_g2[x])
g2.appendNode(n_)
for o, no in zip(node.outputs(), n_.outputs()):
g_to_g2[o] = no
for node in g.outputs():
g2.registerOutput(g_to_g2[node])
t_node = g2.create("TensorTest").t_("a", torch.ones([2, 2]))
assert(t_node.attributeNames() == ["a"])
g2.appendNode(t_node)
assert(torch.equal(torch.ones([2, 2]), t_node.t("a")))
self.assertExpected(str(g2))
开发者ID:Northrend,项目名称:pytorch,代码行数:27,代码来源:test_jit.py
示例14: test_regex_matches_are_initialized_correctly
def test_regex_matches_are_initialized_correctly(self):
class Net(torch.nn.Module):
def __init__(self):
super(Net, self).__init__()
self.linear_1_with_funky_name = torch.nn.Linear(5, 10)
self.linear_2 = torch.nn.Linear(10, 5)
self.conv = torch.nn.Conv1d(5, 5, 5)
def forward(self, inputs): # pylint: disable=arguments-differ
pass
# pyhocon does funny things if there's a . in a key. This test makes sure that we
# handle these kinds of regexes correctly.
json_params = """{"initializer": [
["conv", {"type": "constant", "val": 5}],
["funky_na.*bi", {"type": "constant", "val": 7}]
]}
"""
params = Params(pyhocon.ConfigFactory.parse_string(json_params))
initializers = InitializerApplicator.from_params(params['initializer'])
model = Net()
initializers(model)
for parameter in model.conv.parameters():
assert torch.equal(parameter.data, torch.ones(parameter.size()) * 5)
parameter = model.linear_1_with_funky_name.bias
assert torch.equal(parameter.data, torch.ones(parameter.size()) * 7)
开发者ID:Jordan-Sauchuk,项目名称:allennlp,代码行数:28,代码来源:initializers_test.py
示例15: test_Concat
def test_Concat(self):
input = torch.randn(4, 2)
num_modules = random.randint(2, 5)
linears = [nn.Linear(2, 5) for i in range(num_modules)]
m = nn.Concat(0)
for l in linears:
m.add(l)
l.zeroGradParameters()
l.weight.fill_(1)
l.bias.fill_(0)
# Check that these don't raise errors
m.__repr__()
str(m)
output = m.forward(input)
output2 = input.sum(1, True).expand(4, 5).repeat(num_modules, 1)
self.assertEqual(output2, output)
gradInput = m.backward(input, torch.ones(output2.size()))
gradInput2 = torch.ones(4, 2).fill_(num_modules * 5)
self.assertEqual(gradInput, gradInput2)
gradWeight = input.sum(0, keepdim=True).expand(5, 2)
for l in linears:
self.assertEqual(gradWeight, l.gradWeight)
开发者ID:bhuWenDongchao,项目名称:pytorch,代码行数:27,代码来源:test_legacy_nn.py
示例16: test_joint_optimize
def test_joint_optimize(
self,
mock_get_best_candidates,
mock_gen_candidates,
mock_gen_batch_initial_conditions,
cuda=False,
):
q = 3
num_restarts = 2
raw_samples = 10
options = {}
mock_acq_function = MockAcquisitionFunction()
tkwargs = {"device": torch.device("cuda") if cuda else torch.device("cpu")}
for dtype in (torch.float, torch.double):
tkwargs["dtype"] = dtype
mock_gen_batch_initial_conditions.return_value = torch.zeros(
num_restarts, q, 3, **tkwargs
)
mock_gen_candidates.return_value = torch.cat(
[i * torch.ones(1, q, 3, **tkwargs) for i in range(num_restarts)], dim=0
)
mock_get_best_candidates.return_value = torch.ones(1, q, 3, **tkwargs)
expected_candidates = mock_get_best_candidates.return_value
bounds = torch.stack(
[torch.zeros(3, **tkwargs), 4 * torch.ones(3, **tkwargs)]
)
candidates = joint_optimize(
acq_function=mock_acq_function,
bounds=bounds,
q=q,
num_restarts=num_restarts,
raw_samples=raw_samples,
options=options,
)
self.assertTrue(torch.equal(candidates, expected_candidates))
开发者ID:saschwan,项目名称:botorch,代码行数:35,代码来源:test_optimize.py
示例17: setUp
def setUp(self):
# Tests will use 3 filters and image width, height = 2 X 2
# Batch size 1
x = torch.ones((1, 3, 2, 2))
x[0, 0, 1, 0] = 1.1
x[0, 0, 1, 1] = 1.2
x[0, 1, 0, 1] = 1.2
x[0, 2, 1, 0] = 1.3
self.x = x
self.gradient = torch.rand(x.shape)
# Batch size 2
x = torch.ones((2, 3, 2, 2))
x[0, 0, 1, 0] = 1.1
x[0, 0, 1, 1] = 1.2
x[0, 1, 0, 1] = 1.2
x[0, 2, 1, 0] = 1.3
x[1, 0, 0, 0] = 1.4
x[1, 1, 0, 0] = 1.5
x[1, 1, 0, 1] = 1.6
x[1, 2, 1, 1] = 1.7
self.x2 = x
self.gradient2 = torch.rand(x.shape)
# All equal
self.dutyCycle = torch.zeros((1, 3, 1, 1))
self.dutyCycle[:] = 1.0 / 3.0
开发者ID:rhyolight,项目名称:nupic.research,代码行数:29,代码来源:k_winners_cnn_test.py
示例18: model
def model():
p2 = torch.tensor(torch.ones(2) / 2)
p3 = torch.tensor(torch.ones(3) / 3)
x2 = pyro.sample("x2", dist.OneHotCategorical(p2))
x3 = pyro.sample("x3", dist.OneHotCategorical(p3))
assert x2.shape == torch.Size([2]) + iarange_shape + p2.shape
assert x3.shape == torch.Size([3, 1]) + iarange_shape + p3.shape
开发者ID:lewisKit,项目名称:pyro,代码行数:7,代码来源:test_valid_models.py
示例19: test_flattened_index_select
def test_flattened_index_select(self):
indices = numpy.array([[1, 2],
[3, 4]])
targets = torch.ones([2, 6, 3]).cumsum(1) - 1
# Make the second batch double it's index so they're different.
targets[1, :, :] *= 2
indices = torch.tensor(indices, dtype=torch.long)
selected = util.flattened_index_select(targets, indices)
assert list(selected.size()) == [2, 2, 2, 3]
ones = numpy.ones([3])
numpy.testing.assert_array_equal(selected[0, 0, 0, :].data.numpy(), ones)
numpy.testing.assert_array_equal(selected[0, 0, 1, :].data.numpy(), ones * 2)
numpy.testing.assert_array_equal(selected[0, 1, 0, :].data.numpy(), ones * 3)
numpy.testing.assert_array_equal(selected[0, 1, 1, :].data.numpy(), ones * 4)
numpy.testing.assert_array_equal(selected[1, 0, 0, :].data.numpy(), ones * 2)
numpy.testing.assert_array_equal(selected[1, 0, 1, :].data.numpy(), ones * 4)
numpy.testing.assert_array_equal(selected[1, 1, 0, :].data.numpy(), ones * 6)
numpy.testing.assert_array_equal(selected[1, 1, 1, :].data.numpy(), ones * 8)
# Check we only accept 2D indices.
with pytest.raises(ConfigurationError):
util.flattened_index_select(targets, torch.ones([3, 4, 5]))
开发者ID:ziaridoy20,项目名称:allennlp,代码行数:26,代码来源:util_test.py
示例20: knn
def knn(Mxx, Mxy, Myy, k, sqrt):
n0 = Mxx.size(0)
n1 = Myy.size(0)
label = torch.cat((torch.ones(n0),torch.zeros(n1)))
M = torch.cat((torch.cat((Mxx,Mxy),1), torch.cat((Mxy.transpose(0,1),Myy), 1)), 0)
if sqrt:
M = M.abs().sqrt()
INFINITY = float('inf')
val, idx = (M+torch.diag(INFINITY*torch.ones(n0+n1))).topk(k, 0, False)
count = torch.zeros(n0+n1)
for i in range(0,k):
count = count + label.index_select(0,idx[i])
pred = torch.ge(count, (float(k)/2)*torch.ones(n0+n1)).float()
s = Score_knn()
s.tp = (pred*label).sum()
s.fp = (pred*(1-label)).sum()
s.fn = ((1-pred)*label).sum()
s.tn = ((1-pred)*(1-label)).sum()
s.precision = s.tp/(s.tp+s.fp)
s.recall = s.tp/(s.tp+s.fn)
s.acc_t = s.tp/(s.tp+s.fn)
s.acc_f = s.tn/(s.tn+s.fp)
s.acc = torch.eq(label, pred).float().mean()
s.k = k
return s
开发者ID:RobinROAR,项目名称:TensorflowTutorialsCode,代码行数:28,代码来源:metric.py
注:本文中的torch.ones函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。 |
请发表评论