本文整理汇总了Python中torch.rand函数的典型用法代码示例。如果您正苦于以下问题:Python rand函数的具体用法?Python rand怎么用?Python rand使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了rand函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。
示例1: setUp
def setUp(self, size=(2, 5), batch=3, dtype=torch.float64, device=None,
seed=None, mu=None, cov=None, A=None, b=None):
'''Test the correctness of batch implementation of mean().
This function will stack `[1 * mu, 2 * mu, ..., batch * mu]`.
Then, it will see whether the batch output is accurate or not.
Args:
size: Tuple size of matrix A.
batch: The batch size > 0.
dtype: data type.
device: In which device.
seed: Seed for the random number generator.
mu: To test a specific mean mu.
cov: To test a specific covariance matrix.
A: To test a specific A matrix.
b: To test a specific bias b.
'''
if seed is not None:
torch.manual_seed(seed)
if A is None:
A = torch.rand(size, dtype=dtype, device=device)
if b is None:
b = torch.rand(size[0], dtype=dtype, device=device)
if mu is None:
mu = torch.rand(size[1], dtype=dtype, device=device)
if cov is None:
cov = rand.definite(size[1], dtype=dtype, device=device,
positive=True, semi=False, norm=10**2)
self.A = A
self.b = b
var = torch.diag(cov)
self.batch_mean = torch.stack([(i + 1) * mu for i in range(batch)])
self.batch_cov = torch.stack([(i + 1) * cov for i in range(batch)])
self.batch_var = torch.stack([(i + 1) * var for i in range(batch)])
开发者ID:ModarTensai,项目名称:network_moments,代码行数:35,代码来源:tests.py
示例2: unit_test
def unit_test(args):
''' test different (kinds of) predicate detectors '''
print("Torch uninitialized 5x3 matrix:")
x_t = torch.Tensor(5, 3)
print(x_t)
print("Torch randomly initialized 5x3 matrix X:")
x_t = torch.rand(5, 3)
if args.verbose:
print(x_t)
print("size:", x_t.size())
print("Torch randomly initialized 5x3 matrix Y:")
y_t = torch.rand(5, 3)
if args.verbose:
print(y_t)
print("X + Y:")
z_t = torch.add(x_t, y_t)
print(z_t)
print("slice (X + Y)[:, 1]:")
print(z_t[:, 1])
num_wrong = 0
print("unit_test: num_tests:", 1,
" num_wrong:", num_wrong, " -- ", "FAIL" if num_wrong else "PASS")
开发者ID:sprax,项目名称:python,代码行数:27,代码来源:ptt.py
示例3: test_FixedNoiseMultiTaskGP_single_output
def test_FixedNoiseMultiTaskGP_single_output(self, cuda=False):
for double in (False, True):
tkwargs = {
"device": torch.device("cuda") if cuda else torch.device("cpu"),
"dtype": torch.double if double else torch.float,
}
model = _get_fixed_noise_model_single_output(**tkwargs)
self.assertIsInstance(model, FixedNoiseMultiTaskGP)
self.assertIsInstance(model.likelihood, FixedNoiseGaussianLikelihood)
self.assertIsInstance(model.mean_module, ConstantMean)
self.assertIsInstance(model.covar_module, ScaleKernel)
matern_kernel = model.covar_module.base_kernel
self.assertIsInstance(matern_kernel, MaternKernel)
self.assertIsInstance(matern_kernel.lengthscale_prior, GammaPrior)
self.assertIsInstance(model.task_covar_module, IndexKernel)
self.assertEqual(model._rank, 2)
self.assertEqual(
model.task_covar_module.covar_factor.shape[-1], model._rank
)
# test model fitting
mll = ExactMarginalLogLikelihood(model.likelihood, model)
mll = fit_gpytorch_model(mll, options={"maxiter": 1})
# test posterior
test_x = torch.rand(2, 1, **tkwargs)
posterior_f = model.posterior(test_x)
self.assertIsInstance(posterior_f, GPyTorchPosterior)
self.assertIsInstance(posterior_f.mvn, MultivariateNormal)
# test posterior (batch eval)
test_x = torch.rand(3, 2, 1, **tkwargs)
posterior_f = model.posterior(test_x)
self.assertIsInstance(posterior_f, GPyTorchPosterior)
self.assertIsInstance(posterior_f.mvn, MultivariateNormal)
开发者ID:saschwan,项目名称:botorch,代码行数:35,代码来源:test_multitask.py
示例4: test_FixedNoiseGP
def test_FixedNoiseGP(self, cuda=False):
for batch_shape in (torch.Size([]), torch.Size([2])):
for num_outputs in (1, 2):
for double in (False, True):
tkwargs = {
"device": torch.device("cuda") if cuda else torch.device("cpu"),
"dtype": torch.double if double else torch.float,
}
model = self._get_model(
batch_shape=batch_shape,
num_outputs=num_outputs,
n=10,
**tkwargs
)
self.assertIsInstance(model, FixedNoiseGP)
self.assertIsInstance(
model.likelihood, FixedNoiseGaussianLikelihood
)
self.assertIsInstance(model.mean_module, ConstantMean)
self.assertIsInstance(model.covar_module, ScaleKernel)
matern_kernel = model.covar_module.base_kernel
self.assertIsInstance(matern_kernel, MaternKernel)
self.assertIsInstance(matern_kernel.lengthscale_prior, GammaPrior)
# test model fitting
mll = ExactMarginalLogLikelihood(model.likelihood, model)
mll = fit_gpytorch_model(mll, options={"maxiter": 1})
# Test forward
test_x = torch.rand(batch_shape + torch.Size([3, 1]), **tkwargs)
posterior = model(test_x)
self.assertIsInstance(posterior, MultivariateNormal)
# TODO: Pass observation noise into posterior
# posterior_obs = model.posterior(test_x, observation_noise=True)
# self.assertTrue(
# torch.allclose(
# posterior_f.variance + 0.01,
# posterior_obs.variance
# )
# )
# test posterior
# test non batch evaluation
X = torch.rand(batch_shape + torch.Size([3, 1]), **tkwargs)
posterior = model.posterior(X)
self.assertIsInstance(posterior, GPyTorchPosterior)
self.assertEqual(
posterior.mean.shape, batch_shape + torch.Size([3, num_outputs])
)
# test batch evaluation
X = torch.rand(
torch.Size([2]) + batch_shape + torch.Size([3, 1]), **tkwargs
)
posterior = model.posterior(X)
self.assertIsInstance(posterior, GPyTorchPosterior)
self.assertEqual(
posterior.mean.shape,
torch.Size([2]) + batch_shape + torch.Size([3, num_outputs]),
)
开发者ID:saschwan,项目名称:botorch,代码行数:60,代码来源:test_gp_regression.py
示例5: sample_relax
def sample_relax(logits): #, k=1):
# u = torch.rand(B,C).clamp(1e-8, 1.-1e-8) #.cuda()
u = torch.rand(B,C).clamp(1e-12, 1.-1e-12) #.cuda()
gumbels = -torch.log(-torch.log(u))
z = logits + gumbels
b = torch.argmax(z, dim=1)
cat = Categorical(logits=logits)
logprob = cat.log_prob(b).view(B,1)
v_k = torch.rand(B,1).clamp(1e-12, 1.-1e-12)
z_tilde_b = -torch.log(-torch.log(v_k))
#this way seems biased even tho it shoudlnt be
# v_k = torch.gather(input=u, dim=1, index=b.view(B,1))
# z_tilde_b = torch.gather(input=z, dim=1, index=b.view(B,1))
v = torch.rand(B,C).clamp(1e-12, 1.-1e-12) #.cuda()
probs = torch.softmax(logits,dim=1).repeat(B,1)
# print (probs.shape, torch.log(v_k).shape, torch.log(v).shape)
# fasdfa
# print (v.shape)
# print (v.shape)
z_tilde = -torch.log((- torch.log(v) / probs) - torch.log(v_k))
# print (z_tilde)
# print (z_tilde_b)
z_tilde.scatter_(dim=1, index=b.view(B,1), src=z_tilde_b)
# print (z_tilde)
# fasdfs
return z, b, logprob, z_tilde
开发者ID:chriscremer,项目名称:Other_Code,代码行数:34,代码来源:plotting_cat_grads_dist_4.py
示例6: test_forward_works_on_higher_order_input
def test_forward_works_on_higher_order_input(self):
params = Params({
"words": {
"type": "embedding",
"num_embeddings": 20,
"embedding_dim": 2,
},
"characters": {
"type": "character_encoding",
"embedding": {
"embedding_dim": 4,
"num_embeddings": 15,
},
"encoder": {
"type": "cnn",
"embedding_dim": 4,
"num_filters": 10,
"ngram_filter_sizes": [3],
},
}
})
token_embedder = BasicTextFieldEmbedder.from_params(self.vocab, params)
inputs = {
'words': Variable(torch.rand(3, 4, 5, 6) * 20).long(),
'characters': Variable(torch.rand(3, 4, 5, 6, 7) * 15).long(),
}
assert token_embedder(inputs, num_wrapping_dims=2).size() == (3, 4, 5, 6, 12)
开发者ID:Jordan-Sauchuk,项目名称:allennlp,代码行数:27,代码来源:basic_text_field_embedder_test.py
示例7: visualize_results
def visualize_results(self, epoch, fix=True):
self.G.eval()
if not os.path.exists(self.result_dir + '/' + self.dataset + '/' + self.model_name):
os.makedirs(self.result_dir + '/' + self.dataset + '/' + self.model_name)
tot_num_samples = min(self.sample_num, self.batch_size)
image_frame_dim = int(np.floor(np.sqrt(tot_num_samples)))
if fix:
""" fixed noise """
samples = self.G(self.sample_z_)
else:
""" random noise """
if self.gpu_mode:
sample_z_ = Variable(torch.rand((self.batch_size, self.z_dim)).cuda(), volatile=True)
else:
sample_z_ = Variable(torch.rand((self.batch_size, self.z_dim)), volatile=True)
samples = self.G(sample_z_)
if self.gpu_mode:
samples = samples.cpu().data.numpy().transpose(0, 2, 3, 1)
else:
samples = samples.data.numpy().transpose(0, 2, 3, 1)
utils.save_images(samples[:image_frame_dim * image_frame_dim, :, :, :], [image_frame_dim, image_frame_dim],
self.result_dir + '/' + self.dataset + '/' + self.model_name + '/' + self.model_name + '_epoch%03d' % epoch + '.png')
开发者ID:zbxzc35,项目名称:pytorch-generative-model-collections,代码行数:28,代码来源:GAN.py
示例8: sample_relax_given_class
def sample_relax_given_class(logits, samp):
cat = Categorical(logits=logits)
u = torch.rand(B,C).clamp(1e-8, 1.-1e-8)
gumbels = -torch.log(-torch.log(u))
z = logits + gumbels
b = samp #torch.argmax(z, dim=1)
logprob = cat.log_prob(b).view(B,1)
u_b = torch.gather(input=u, dim=1, index=b.view(B,1))
z_tilde_b = -torch.log(-torch.log(u_b))
z_tilde = -torch.log((- torch.log(u) / torch.softmax(logits, dim=1)) - torch.log(u_b))
z_tilde.scatter_(dim=1, index=b.view(B,1), src=z_tilde_b)
z = z_tilde
u_b = torch.gather(input=u, dim=1, index=b.view(B,1))
z_tilde_b = -torch.log(-torch.log(u_b))
u = torch.rand(B,C).clamp(1e-8, 1.-1e-8)
z_tilde = -torch.log((- torch.log(u) / torch.softmax(logits, dim=1)) - torch.log(u_b))
z_tilde.scatter_(dim=1, index=b.view(B,1), src=z_tilde_b)
return z, z_tilde, logprob
开发者ID:chriscremer,项目名称:Other_Code,代码行数:29,代码来源:plotting_cat_grads_dist.py
示例9: test_forward_runs_with_non_bijective_mapping
def test_forward_runs_with_non_bijective_mapping(self):
elmo_fixtures_path = self.FIXTURES_ROOT / 'elmo'
options_file = str(elmo_fixtures_path / 'options.json')
weight_file = str(elmo_fixtures_path / 'lm_weights.hdf5')
params = Params({
"token_embedders": {
"words": {
"type": "embedding",
"num_embeddings": 20,
"embedding_dim": 2,
},
"elmo": {
"type": "elmo_token_embedder",
"options_file": options_file,
"weight_file": weight_file
},
},
"embedder_to_indexer_map": {"words": ["words"], "elmo": ["elmo", "words"]}
})
token_embedder = BasicTextFieldEmbedder.from_params(self.vocab, params)
inputs = {
'words': (torch.rand(3, 6) * 20).long(),
'elmo': (torch.rand(3, 6, 50) * 15).long(),
}
token_embedder(inputs)
开发者ID:apmoore1,项目名称:allennlp,代码行数:25,代码来源:basic_text_field_embedder_test.py
示例10: grad2
def grad2():
W = Variable(torch.rand(2, 2), requires_grad=True)
W2 = Variable(torch.rand(2, 1), requires_grad=True)
x1 = Variable(torch.rand(1, 2), requires_grad=True)
x2 = Variable(torch.rand(1, 2), requires_grad=True)
print("w: ")
print(W)
print("x1: ")
print(x1)
print("x2: ")
print(x2)
print("--------------------")
y1 = torch.matmul(torch.matmul(x1, W), W2)
print(torch.matmul(W, W2))
# y = Variable(y, requires_grad=True)
# print("y1:")
# print(y1)
y1.backward()
# print(W.grad)
print(x1.grad)
# W.grad.data.zero_()
# x1.grad.data.zero_()
y2 = torch.matmul(torch.matmul(x2, W), W2)
y2.backward()
# print("y2: ")
# print(y2)
# print(W.grad)
print(x2.grad)
开发者ID:gonglixue,项目名称:PRML_Python,代码行数:32,代码来源:gradient.py
示例11: sample_relax
def sample_relax(probs):
#Sample z
u = torch.rand(B,C)
gumbels = -torch.log(-torch.log(u))
z = torch.log(probs) + gumbels
b = torch.argmax(z, dim=1)
logprob = cat.log_prob(b)
#Sample z_tilde
u_b = torch.rand(B,1)
z_tilde_b = -torch.log(-torch.log(u_b))
u = torch.rand(B,C)
z_tilde = -torch.log((- torch.log(u) / probs) - torch.log(u_b))
# print (z_tilde)
z_tilde[:,b] = z_tilde_b
# print (z_tilde)
# fasdfasd
# print (z)
# print (b)
# print (z_tilde)
# print (logprob)
# print (probs)
# fsdfa
return z, b, logprob, z_tilde
开发者ID:chriscremer,项目名称:Other_Code,代码行数:30,代码来源:is_pz_grad_dependent_on_theta_2.py
示例12: test_add_output_dim
def test_add_output_dim(self, cuda=False):
for double in (False, True):
tkwargs = {
"device": torch.device("cuda") if cuda else torch.device("cpu"),
"dtype": torch.double if double else torch.float,
}
original_batch_shape = torch.Size([2])
# check exception is raised
X = torch.rand(2, 1, **tkwargs)
with self.assertRaises(ValueError):
add_output_dim(X=X, original_batch_shape=original_batch_shape)
# test no new batch dims
X = torch.rand(2, 2, 1, **tkwargs)
X_out, output_dim_idx = add_output_dim(
X=X, original_batch_shape=original_batch_shape
)
self.assertTrue(torch.equal(X_out, X.unsqueeze(0)))
self.assertEqual(output_dim_idx, 0)
# test new batch dims
X = torch.rand(3, 2, 2, 1, **tkwargs)
X_out, output_dim_idx = add_output_dim(
X=X, original_batch_shape=original_batch_shape
)
self.assertTrue(torch.equal(X_out, X.unsqueeze(1)))
self.assertEqual(output_dim_idx, 1)
开发者ID:saschwan,项目名称:botorch,代码行数:25,代码来源:test_utils.py
示例13: run_test_argmax
def run_test_argmax():
test_argmax = TestArgMax()
k=torch.rand(4)
v=torch.rand(4)
y=torch.rand(4)
loss = test_argmax(k,v,y)
loss.backward()
开发者ID:tianzhiliang,项目名称:test,代码行数:7,代码来源:argmax_gradient.py
示例14: visualize_results
def visualize_results(self, epoch, fix=True):
self.G.eval()
if not os.path.exists(self.result_dir + '/' + self.dataset + '/' + self.model_name):
os.makedirs(self.result_dir + '/' + self.dataset + '/' + self.model_name)
image_frame_dim = int(np.floor(np.sqrt(self.sample_num)))
if fix:
""" fixed noise """
samples = self.G(self.sample_z_, self.sample_y_)
else:
""" random noise """
temp = torch.LongTensor(self.batch_size, 1).random_() % 10
sample_y_ = torch.FloatTensor(self.batch_size, 10)
sample_y_.zero_()
sample_y_.scatter_(1, temp, 1)
if self.gpu_mode:
sample_z_, sample_y_ = Variable(torch.rand((self.batch_size, self.z_dim)).cuda(), volatile=True), \
Variable(sample_y_.cuda(), volatile=True)
else:
sample_z_, sample_y_ = Variable(torch.rand((self.batch_size, self.z_dim)), volatile=True), \
Variable(sample_y_, volatile=True)
samples = self.G(sample_z_, sample_y_)
if self.gpu_mode:
samples = samples.cpu().data.numpy().transpose(0, 2, 3, 1)
else:
samples = samples.data.numpy().transpose(0, 2, 3, 1)
utils.save_images(samples[:image_frame_dim * image_frame_dim, :, :, :], [image_frame_dim, image_frame_dim],
self.result_dir + '/' + self.dataset + '/' + self.model_name + '/' + self.model_name + '_epoch%03d' % epoch + '.png')
开发者ID:zbxzc35,项目名称:pytorch-generative-model-collections,代码行数:33,代码来源:ACGAN.py
示例15: setUp
def setUp(self):
# Tests will use 3 filters and image width, height = 2 X 2
# Batch size 1
x = torch.ones((1, 3, 2, 2))
x[0, 0, 1, 0] = 1.1
x[0, 0, 1, 1] = 1.2
x[0, 1, 0, 1] = 1.2
x[0, 2, 1, 0] = 1.3
self.x = x
self.gradient = torch.rand(x.shape)
# Batch size 2
x = torch.ones((2, 3, 2, 2))
x[0, 0, 1, 0] = 1.1
x[0, 0, 1, 1] = 1.2
x[0, 1, 0, 1] = 1.2
x[0, 2, 1, 0] = 1.3
x[1, 0, 0, 0] = 1.4
x[1, 1, 0, 0] = 1.5
x[1, 1, 0, 1] = 1.6
x[1, 2, 1, 1] = 1.7
self.x2 = x
self.gradient2 = torch.rand(x.shape)
# All equal
self.dutyCycle = torch.zeros((1, 3, 1, 1))
self.dutyCycle[:] = 1.0 / 3.0
开发者ID:rhyolight,项目名称:nupic.research,代码行数:29,代码来源:k_winners_cnn_test.py
示例16: get_loss
def get_loss(self, image_a_pred, image_b_pred, mask_a, mask_b):
loss = 0
# get the nonzero indices
mask_a_indices_flat = torch.nonzero(mask_a)
mask_b_indices_flat = torch.nonzero(mask_b)
if len(mask_a_indices_flat) == 0:
return Variable(torch.cuda.LongTensor([0]), requires_grad=True)
if len(mask_b_indices_flat) == 0:
return Variable(torch.cuda.LongTensor([0]), requires_grad=True)
# take 5000 random pixel samples of the object, using the mask
num_samples = 10000
rand_numbers_a = (torch.rand(num_samples)*len(mask_a_indices_flat)).cuda()
rand_indices_a = Variable(torch.floor(rand_numbers_a).type(torch.cuda.LongTensor), requires_grad=False)
randomized_mask_a_indices_flat = torch.index_select(mask_a_indices_flat, 0, rand_indices_a).squeeze(1)
rand_numbers_b = (torch.rand(num_samples)*len(mask_b_indices_flat)).cuda()
rand_indices_b = Variable(torch.floor(rand_numbers_b).type(torch.cuda.LongTensor), requires_grad=False)
randomized_mask_b_indices_flat = torch.index_select(mask_b_indices_flat, 0, rand_indices_b).squeeze(1)
# index into the image and get descriptors
M_margin = 0.5 # margin parameter
random_img_a_object_descriptors = torch.index_select(image_a_pred, 1, randomized_mask_a_indices_flat)
random_img_b_object_descriptors = torch.index_select(image_b_pred, 1, randomized_mask_b_indices_flat)
pixel_wise_loss = (random_img_a_object_descriptors - random_img_b_object_descriptors).pow(2).sum(dim=2)
pixel_wise_loss = torch.add(pixel_wise_loss, -2*M_margin)
zeros_vec = torch.zeros_like(pixel_wise_loss)
loss += torch.max(zeros_vec, pixel_wise_loss).sum()
return loss
开发者ID:shooter2062424,项目名称:pytorch-dense-correspondence,代码行数:32,代码来源:semantic_consistency_loss.py
示例17: test_degenerate_GPyTorchPosterior
def test_degenerate_GPyTorchPosterior(self, cuda=False):
device = torch.device("cuda") if cuda else torch.device("cpu")
for dtype in (torch.float, torch.double):
# singular covariance matrix
degenerate_covar = torch.tensor(
[[1, 1, 0], [1, 1, 0], [0, 0, 2]], dtype=dtype, device=device
)
mean = torch.rand(3, dtype=dtype, device=device)
mvn = MultivariateNormal(mean, lazify(degenerate_covar))
posterior = GPyTorchPosterior(mvn=mvn)
# basics
self.assertEqual(posterior.device.type, device.type)
self.assertTrue(posterior.dtype == dtype)
self.assertEqual(posterior.event_shape, torch.Size([3, 1]))
self.assertTrue(torch.equal(posterior.mean, mean.unsqueeze(-1)))
variance_exp = degenerate_covar.diag().unsqueeze(-1)
self.assertTrue(torch.equal(posterior.variance, variance_exp))
# rsample
with warnings.catch_warnings(record=True) as w:
# we check that the p.d. warning is emitted - this only
# happens once per posterior, so we need to check only once
samples = posterior.rsample(sample_shape=torch.Size([4]))
self.assertEqual(len(w), 1)
self.assertTrue(issubclass(w[-1].category, RuntimeWarning))
self.assertTrue("not p.d." in str(w[-1].message))
self.assertEqual(samples.shape, torch.Size([4, 3, 1]))
samples2 = posterior.rsample(sample_shape=torch.Size([4, 2]))
self.assertEqual(samples2.shape, torch.Size([4, 2, 3, 1]))
# rsample w/ base samples
base_samples = torch.randn(4, 3, 1, device=device, dtype=dtype)
samples_b1 = posterior.rsample(
sample_shape=torch.Size([4]), base_samples=base_samples
)
samples_b2 = posterior.rsample(
sample_shape=torch.Size([4]), base_samples=base_samples
)
self.assertTrue(torch.allclose(samples_b1, samples_b2))
base_samples2 = torch.randn(4, 2, 3, 1, device=device, dtype=dtype)
samples2_b1 = posterior.rsample(
sample_shape=torch.Size([4, 2]), base_samples=base_samples2
)
samples2_b2 = posterior.rsample(
sample_shape=torch.Size([4, 2]), base_samples=base_samples2
)
self.assertTrue(torch.allclose(samples2_b1, samples2_b2))
# collapse_batch_dims
b_mean = torch.rand(2, 3, dtype=dtype, device=device)
b_degenerate_covar = degenerate_covar.expand(2, *degenerate_covar.shape)
b_mvn = MultivariateNormal(b_mean, lazify(b_degenerate_covar))
b_posterior = GPyTorchPosterior(mvn=b_mvn)
b_base_samples = torch.randn(4, 2, 3, 1, device=device, dtype=dtype)
with warnings.catch_warnings(record=True) as w:
b_samples = b_posterior.rsample(
sample_shape=torch.Size([4]), base_samples=b_base_samples
)
self.assertEqual(len(w), 1)
self.assertTrue(issubclass(w[-1].category, RuntimeWarning))
self.assertTrue("not p.d." in str(w[-1].message))
self.assertEqual(b_samples.shape, torch.Size([4, 2, 3, 1]))
开发者ID:saschwan,项目名称:botorch,代码行数:60,代码来源:test_gpytorch.py
示例18: test_upper_confidence_bound
def test_upper_confidence_bound(self, cuda=False):
device = torch.device("cuda") if cuda else torch.device("cpu")
for dtype in (torch.float, torch.double):
mean = torch.tensor([[0.0]], device=device, dtype=dtype)
variance = torch.tensor([[1.0]], device=device, dtype=dtype)
mm = MockModel(MockPosterior(mean=mean, variance=variance))
module = UpperConfidenceBound(model=mm, beta=1.0)
X = torch.zeros(1, 1, device=device, dtype=dtype)
ucb = module(X)
ucb_expected = torch.tensor([1.0], device=device, dtype=dtype)
self.assertTrue(torch.allclose(ucb, ucb_expected, atol=1e-4))
module = UpperConfidenceBound(model=mm, beta=1.0, maximize=False)
X = torch.zeros(1, 1, device=device, dtype=dtype)
ucb = module(X)
ucb_expected = torch.tensor([-1.0], device=device, dtype=dtype)
self.assertTrue(torch.allclose(ucb, ucb_expected, atol=1e-4))
# check for proper error if multi-output model
mean2 = torch.rand(1, 2, device=device, dtype=dtype)
variance2 = torch.rand(1, 2, device=device, dtype=dtype)
mm2 = MockModel(MockPosterior(mean=mean2, variance=variance2))
module2 = UpperConfidenceBound(model=mm2, beta=1.0)
with self.assertRaises(UnsupportedError):
module2(X)
开发者ID:saschwan,项目名称:botorch,代码行数:26,代码来源:test_analytic.py
示例19: test_fit_valid_sets_args
def test_fit_valid_sets_args(self, gtvs):
x = torch.rand(1,5)
y = torch.rand(1,5)
val_data = (1,2)
val_split = 0.2
shuffle = False
torchmodel = MagicMock()
torchmodel.forward = Mock(return_value=1)
optimizer = MagicMock()
metric = Metric('test')
loss = torch.tensor([2], requires_grad=True)
criterion = Mock(return_value=loss)
gtvs.return_value = (1, 2)
torchbearermodel = Model(torchmodel, optimizer, criterion, [metric])
torchbearermodel.fit_generator = Mock()
torchbearermodel.fit(x, y, 1, validation_data=val_data, validation_split=val_split, shuffle=shuffle)
gtvs.assert_called_once()
self.assertTrue(list(gtvs.call_args[0][0].numpy()[0]) == list(x.numpy()[0]))
self.assertTrue(list(gtvs.call_args[0][1].numpy()[0]) == list(y.numpy()[0]))
self.assertTrue(gtvs.call_args[0][2] == val_data)
self.assertTrue(gtvs.call_args[0][3] == val_split)
self.assertTrue(gtvs.call_args[1]['shuffle'] == shuffle)
开发者ID:little1tow,项目名称:torchbearer,代码行数:27,代码来源:test_torchbearer.py
示例20: test_sequential_scorer_d4_3
def test_sequential_scorer_d4_3():
global test_doc
torch.manual_seed(1)
seq = SequentialScorer(TEST_EMBEDDING_DIM, min_features, 2, COREF_FF_HIDDEN)
emb5 = ag.Variable(torch.rand(1, TEST_EMBEDDING_DIM))
emb0 = ag.Variable(torch.rand(1, TEST_EMBEDDING_DIM))
pred = float(seq(emb5, emb0, ['exact-match', 'last-token-match']))
assert_almost_equals(pred, -0.359851, places=4)
开发者ID:cedebrun,项目名称:gt-nlp-class,代码行数:8,代码来源:test_neural_coref.py
注:本文中的torch.rand函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。 |
请发表评论