本文整理汇总了Python中tensorflow.python.ops.distributions.kullback_leibler.kl_divergence函数的典型用法代码示例。如果您正苦于以下问题:Python kl_divergence函数的具体用法?Python kl_divergence怎么用?Python kl_divergence使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了kl_divergence函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。
示例1: testDomainErrorExceptions
def testDomainErrorExceptions(self):
class MyDistException(normal.Normal):
pass
# Register KL to a lambda that spits out the name parameter
@kullback_leibler.RegisterKL(MyDistException, MyDistException)
# pylint: disable=unused-argument,unused-variable
def _kl(a, b, name=None):
return array_ops.identity([float("nan")])
# pylint: disable=unused-argument,unused-variable
with self.cached_session():
a = MyDistException(loc=0.0, scale=1.0, allow_nan_stats=False)
kl = kullback_leibler.kl_divergence(a, a, allow_nan_stats=False)
with self.assertRaisesOpError(
"KL calculation between .* and .* returned NaN values"):
self.evaluate(kl)
with self.assertRaisesOpError(
"KL calculation between .* and .* returned NaN values"):
a.kl_divergence(a).eval()
a = MyDistException(loc=0.0, scale=1.0, allow_nan_stats=True)
kl_ok = kullback_leibler.kl_divergence(a, a)
self.assertAllEqual([float("nan")], self.evaluate(kl_ok))
self_kl_ok = a.kl_divergence(a)
self.assertAllEqual([float("nan")], self.evaluate(self_kl_ok))
cross_ok = a.cross_entropy(a)
self.assertAllEqual([float("nan")], self.evaluate(cross_ok))
开发者ID:JonathanRaiman,项目名称:tensorflow,代码行数:29,代码来源:kullback_leibler_test.py
示例2: testKLRaises
def testKLRaises(self):
ind1 = independent_lib.Independent(
distribution=normal_lib.Normal(
loc=np.float32([-1., 1]),
scale=np.float32([0.1, 0.5])),
reinterpreted_batch_ndims=1)
ind2 = independent_lib.Independent(
distribution=normal_lib.Normal(
loc=np.float32(-1),
scale=np.float32(0.5)),
reinterpreted_batch_ndims=0)
with self.assertRaisesRegexp(
ValueError, "Event shapes do not match"):
kullback_leibler.kl_divergence(ind1, ind2)
ind1 = independent_lib.Independent(
distribution=normal_lib.Normal(
loc=np.float32([-1., 1]),
scale=np.float32([0.1, 0.5])),
reinterpreted_batch_ndims=1)
ind2 = independent_lib.Independent(
distribution=mvn_diag_lib.MultivariateNormalDiag(
loc=np.float32([-1., 1]),
scale_diag=np.float32([0.1, 0.5])),
reinterpreted_batch_ndims=0)
with self.assertRaisesRegexp(
NotImplementedError, "different event shapes"):
kullback_leibler.kl_divergence(ind1, ind2)
开发者ID:AndrewTwinz,项目名称:tensorflow,代码行数:30,代码来源:independent_test.py
示例3: testBetaBetaKL
def testBetaBetaKL(self):
with self.test_session() as sess:
for shape in [(10,), (4, 5)]:
a1 = 6.0 * np.random.random(size=shape) + 1e-4
b1 = 6.0 * np.random.random(size=shape) + 1e-4
a2 = 6.0 * np.random.random(size=shape) + 1e-4
b2 = 6.0 * np.random.random(size=shape) + 1e-4
# Take inverse softplus of values to test BetaWithSoftplusConcentration
a1_sp = np.log(np.exp(a1) - 1.0)
b1_sp = np.log(np.exp(b1) - 1.0)
a2_sp = np.log(np.exp(a2) - 1.0)
b2_sp = np.log(np.exp(b2) - 1.0)
d1 = beta_lib.Beta(concentration1=a1, concentration0=b1)
d2 = beta_lib.Beta(concentration1=a2, concentration0=b2)
d1_sp = beta_lib.BetaWithSoftplusConcentration(concentration1=a1_sp,
concentration0=b1_sp)
d2_sp = beta_lib.BetaWithSoftplusConcentration(concentration1=a2_sp,
concentration0=b2_sp)
kl_expected = (special.betaln(a2, b2) - special.betaln(a1, b1) +
(a1 - a2) * special.digamma(a1) +
(b1 - b2) * special.digamma(b1) +
(a2 - a1 + b2 - b1) * special.digamma(a1 + b1))
for dist1 in [d1, d1_sp]:
for dist2 in [d2, d2_sp]:
kl = kullback_leibler.kl_divergence(dist1, dist2)
kl_val = sess.run(kl)
self.assertEqual(kl.get_shape(), shape)
self.assertAllClose(kl_val, kl_expected)
# Make sure KL(d1||d1) is 0
kl_same = sess.run(kullback_leibler.kl_divergence(d1, d1))
self.assertAllClose(kl_same, np.zeros_like(kl_expected))
开发者ID:jzuern,项目名称:tensorflow,代码行数:35,代码来源:beta_test.py
示例4: testDirichletDirichletKL
def testDirichletDirichletKL(self):
conc1 = np.array([[1., 2., 3., 1.5, 2.5, 3.5],
[1.5, 2.5, 3.5, 4.5, 5.5, 6.5]])
conc2 = np.array([[0.5, 1., 1.5, 2., 2.5, 3.]])
d1 = dirichlet_lib.Dirichlet(conc1)
d2 = dirichlet_lib.Dirichlet(conc2)
x = d1.sample(int(1e4), seed=0)
kl_sample = math_ops.reduce_mean(d1.log_prob(x) - d2.log_prob(x), 0)
kl_actual = kullback_leibler.kl_divergence(d1, d2)
kl_sample_val = self.evaluate(kl_sample)
kl_actual_val = self.evaluate(kl_actual)
self.assertEqual(conc1.shape[:-1], kl_actual.get_shape())
if not special:
return
kl_expected = (
special.gammaln(np.sum(conc1, -1))
- special.gammaln(np.sum(conc2, -1))
- np.sum(special.gammaln(conc1) - special.gammaln(conc2), -1)
+ np.sum((conc1 - conc2) * (special.digamma(conc1) - special.digamma(
np.sum(conc1, -1, keepdims=True))), -1))
self.assertAllClose(kl_expected, kl_actual_val, atol=0., rtol=1e-6)
self.assertAllClose(kl_sample_val, kl_actual_val, atol=0., rtol=1e-1)
# Make sure KL(d1||d1) is 0
kl_same = self.evaluate(kullback_leibler.kl_divergence(d1, d1))
self.assertAllClose(kl_same, np.zeros_like(kl_expected))
开发者ID:LiuCKind,项目名称:tensorflow,代码行数:32,代码来源:dirichlet_test.py
示例5: testCategoricalCategoricalKL
def testCategoricalCategoricalKL(self):
def np_softmax(logits):
exp_logits = np.exp(logits)
return exp_logits / exp_logits.sum(axis=-1, keepdims=True)
with self.cached_session() as sess:
for categories in [2, 10]:
for batch_size in [1, 2]:
p_logits = self._rng.random_sample((batch_size, categories))
q_logits = self._rng.random_sample((batch_size, categories))
p = onehot_categorical.OneHotCategorical(logits=p_logits)
q = onehot_categorical.OneHotCategorical(logits=q_logits)
prob_p = np_softmax(p_logits)
prob_q = np_softmax(q_logits)
kl_expected = np.sum(
prob_p * (np.log(prob_p) - np.log(prob_q)), axis=-1)
kl_actual = kullback_leibler.kl_divergence(p, q)
kl_same = kullback_leibler.kl_divergence(p, p)
x = p.sample(int(2e4), seed=0)
x = math_ops.cast(x, dtype=dtypes.float32)
# Compute empirical KL(p||q).
kl_sample = math_ops.reduce_mean(p.log_prob(x) - q.log_prob(x), 0)
[kl_sample_, kl_actual_, kl_same_] = sess.run([kl_sample, kl_actual,
kl_same])
self.assertEqual(kl_actual.get_shape(), (batch_size,))
self.assertAllClose(kl_same_, np.zeros_like(kl_expected))
self.assertAllClose(kl_actual_, kl_expected, atol=0., rtol=1e-6)
self.assertAllClose(kl_sample_, kl_expected, atol=1e-2, rtol=0.)
开发者ID:Ajaycs99,项目名称:tensorflow,代码行数:30,代码来源:onehot_categorical_test.py
示例6: testCategoricalCategoricalKL
def testCategoricalCategoricalKL(self):
def np_softmax(logits):
exp_logits = np.exp(logits)
return exp_logits / exp_logits.sum(axis=-1, keepdims=True)
with self.cached_session() as sess:
for categories in [2, 4]:
for batch_size in [1, 10]:
a_logits = np.random.randn(batch_size, categories)
b_logits = np.random.randn(batch_size, categories)
a = categorical.Categorical(logits=a_logits)
b = categorical.Categorical(logits=b_logits)
kl = kullback_leibler.kl_divergence(a, b)
kl_val = sess.run(kl)
# Make sure KL(a||a) is 0
kl_same = sess.run(kullback_leibler.kl_divergence(a, a))
prob_a = np_softmax(a_logits)
prob_b = np_softmax(b_logits)
kl_expected = np.sum(prob_a * (np.log(prob_a) - np.log(prob_b)),
axis=-1)
self.assertEqual(kl.get_shape(), (batch_size,))
self.assertAllClose(kl_val, kl_expected)
self.assertAllClose(kl_same, np.zeros_like(kl_expected))
开发者ID:AnishShah,项目名称:tensorflow,代码行数:28,代码来源:categorical_test.py
示例7: _kl_independent
def _kl_independent(a, b, name="kl_independent"):
"""Batched KL divergence `KL(a || b)` for Independent distributions.
We can leverage the fact that
```
KL(Independent(a) || Independent(b)) = sum(KL(a || b))
```
where the sum is over the `reinterpreted_batch_ndims`.
Args:
a: Instance of `Independent`.
b: Instance of `Independent`.
name: (optional) name to use for created ops. Default "kl_independent".
Returns:
Batchwise `KL(a || b)`.
Raises:
ValueError: If the event space for `a` and `b`, or their underlying
distributions don't match.
"""
p = a.distribution
q = b.distribution
# The KL between any two (non)-batched distributions is a scalar.
# Given that the KL between two factored distributions is the sum, i.e.
# KL(p1(x)p2(y) || q1(x)q2(y)) = KL(p1 || q1) + KL(q1 || q2), we compute
# KL(p || q) and do a `reduce_sum` on the reinterpreted batch dimensions.
if a.event_shape.is_fully_defined() and b.event_shape.is_fully_defined():
if a.event_shape == b.event_shape:
if p.event_shape == q.event_shape:
num_reduce_dims = a.event_shape.ndims - p.event_shape.ndims
reduce_dims = [-i - 1 for i in range(0, num_reduce_dims)]
return math_ops.reduce_sum(
kullback_leibler.kl_divergence(p, q, name=name), axis=reduce_dims)
else:
raise NotImplementedError("KL between Independents with different "
"event shapes not supported.")
else:
raise ValueError("Event shapes do not match.")
else:
with ops.control_dependencies([
check_ops.assert_equal(a.event_shape_tensor(), b.event_shape_tensor()),
check_ops.assert_equal(p.event_shape_tensor(), q.event_shape_tensor())
]):
num_reduce_dims = (
array_ops.shape(a.event_shape_tensor()[0]) -
array_ops.shape(p.event_shape_tensor()[0]))
reduce_dims = math_ops.range(-num_reduce_dims - 1, -1, 1)
return math_ops.reduce_sum(
kullback_leibler.kl_divergence(p, q, name=name), axis=reduce_dims)
开发者ID:didukhle,项目名称:tensorflow,代码行数:52,代码来源:independent.py
示例8: test_kl_reverse_multidim
def test_kl_reverse_multidim(self):
with self.test_session() as sess:
d = 5 # Dimension
p = mvn_full_lib.MultivariateNormalFullCovariance(
covariance_matrix=self._tridiag(d, diag_value=1, offdiag_value=0.5))
q = mvn_diag_lib.MultivariateNormalDiag(scale_diag=[0.5]*d)
approx_kl = cd.monte_carlo_csiszar_f_divergence(
f=cd.kl_reverse,
p=p,
q=q,
num_draws=int(1e5),
seed=1)
approx_kl_self_normalized = cd.monte_carlo_csiszar_f_divergence(
f=lambda logu: cd.kl_reverse(logu, self_normalized=True),
p=p,
q=q,
num_draws=int(1e5),
seed=1)
exact_kl = kullback_leibler.kl_divergence(q, p)
[approx_kl_, approx_kl_self_normalized_, exact_kl_] = sess.run([
approx_kl, approx_kl_self_normalized, exact_kl])
self.assertAllClose(approx_kl_, exact_kl_,
rtol=0.02, atol=0.)
self.assertAllClose(approx_kl_self_normalized_, exact_kl_,
rtol=0.08, atol=0.)
开发者ID:AutumnQYN,项目名称:tensorflow,代码行数:34,代码来源:csiszar_divergence_test.py
示例9: testGammaGammaKL
def testGammaGammaKL(self):
alpha0 = np.array([3.])
beta0 = np.array([1., 2., 3., 1.5, 2.5, 3.5])
alpha1 = np.array([0.4])
beta1 = np.array([0.5, 1., 1.5, 2., 2.5, 3.])
# Build graph.
with self.test_session() as sess:
g0 = gamma_lib.Gamma(concentration=alpha0, rate=beta0)
g1 = gamma_lib.Gamma(concentration=alpha1, rate=beta1)
x = g0.sample(int(1e4), seed=0)
kl_sample = math_ops.reduce_mean(g0.log_prob(x) - g1.log_prob(x), 0)
kl_actual = kullback_leibler.kl_divergence(g0, g1)
# Execute graph.
[kl_sample_, kl_actual_] = sess.run([kl_sample, kl_actual])
kl_expected = ((alpha0 - alpha1) * special.digamma(alpha0)
+ special.gammaln(alpha1)
- special.gammaln(alpha0)
+ alpha1 * np.log(beta0)
- alpha1 * np.log(beta1)
+ alpha0 * (beta1 / beta0 - 1.))
self.assertEqual(beta0.shape, kl_actual.get_shape())
self.assertAllClose(kl_expected, kl_actual_, atol=0., rtol=1e-6)
self.assertAllClose(kl_sample_, kl_actual_, atol=0., rtol=1e-2)
开发者ID:jzuern,项目名称:tensorflow,代码行数:28,代码来源:gamma_test.py
示例10: test_kl_reverse
def test_kl_reverse(self):
with self.test_session() as sess:
q = normal_lib.Normal(
loc=np.ones(6),
scale=np.array([0.5, 1.0, 1.5, 2.0, 2.5, 3.0]))
p = normal_lib.Normal(loc=q.loc + 0.1, scale=q.scale - 0.2)
approx_kl = cd.monte_carlo_csiszar_f_divergence(
f=cd.kl_reverse,
p=p,
q=q,
num_draws=int(1e5),
seed=1)
approx_kl_self_normalized = cd.monte_carlo_csiszar_f_divergence(
f=lambda logu: cd.kl_reverse(logu, self_normalized=True),
p=p,
q=q,
num_draws=int(1e5),
seed=1)
exact_kl = kullback_leibler.kl_divergence(q, p)
[approx_kl_, approx_kl_self_normalized_, exact_kl_] = sess.run([
approx_kl, approx_kl_self_normalized, exact_kl])
self.assertAllClose(approx_kl_, exact_kl_,
rtol=0.07, atol=0.)
self.assertAllClose(approx_kl_self_normalized_, exact_kl_,
rtol=0.02, atol=0.)
开发者ID:AutumnQYN,项目名称:tensorflow,代码行数:33,代码来源:csiszar_divergence_test.py
示例11: test_convergence_to_kl_using_sample_form_on_3dim_normal
def test_convergence_to_kl_using_sample_form_on_3dim_normal(self):
# Test that the sample mean KL is the same as analytic when we use samples
# to estimate every part of the KL divergence ratio.
vector_shape = (2, 3)
n_samples = 5000
with self.test_session():
q = mvn_diag_lib.MultivariateNormalDiag(
loc=self._rng.rand(*vector_shape),
scale_diag=self._rng.rand(*vector_shape))
p = mvn_diag_lib.MultivariateNormalDiag(
loc=self._rng.rand(*vector_shape),
scale_diag=self._rng.rand(*vector_shape))
# In this case, the log_ratio is the KL.
sample_kl = -1 * entropy.elbo_ratio(
log_p=p.log_prob,
q=q,
n=n_samples,
form=entropy.ELBOForms.sample,
seed=42)
actual_kl = kullback_leibler_lib.kl_divergence(q, p)
# Relative tolerance (rtol) chosen 2 times as large as minimim needed to
# pass.
self.assertEqual((2,), sample_kl.get_shape())
self.assertAllClose(actual_kl.eval(), sample_kl.eval(), rtol=0.05)
开发者ID:1000sprites,项目名称:tensorflow,代码行数:27,代码来源:entropy_test.py
示例12: testDefaultVariationalAndPrior
def testDefaultVariationalAndPrior(self):
_, prior, variational, _, log_likelihood = mini_vae()
elbo = vi.elbo(log_likelihood)
expected_elbo = log_likelihood - kullback_leibler.kl_divergence(
variational.distribution, prior)
with self.test_session() as sess:
sess.run(variables.global_variables_initializer())
self.assertAllEqual(*sess.run([expected_elbo, elbo]))
开发者ID:1000sprites,项目名称:tensorflow,代码行数:8,代码来源:variational_inference_test.py
示例13: testKLScalarToMultivariate
def testKLScalarToMultivariate(self):
normal1 = normal_lib.Normal(
loc=np.float32([-1., 1]),
scale=np.float32([0.1, 0.5]))
ind1 = independent_lib.Independent(
distribution=normal1, reinterpreted_batch_ndims=1)
normal2 = normal_lib.Normal(
loc=np.float32([-3., 3]),
scale=np.float32([0.3, 0.3]))
ind2 = independent_lib.Independent(
distribution=normal2, reinterpreted_batch_ndims=1)
normal_kl = kullback_leibler.kl_divergence(normal1, normal2)
ind_kl = kullback_leibler.kl_divergence(ind1, ind2)
self.assertAllClose(
self.evaluate(math_ops.reduce_sum(normal_kl, axis=-1)),
self.evaluate(ind_kl))
开发者ID:AndrewTwinz,项目名称:tensorflow,代码行数:18,代码来源:independent_test.py
示例14: testExplicitVariationalAndPrior
def testExplicitVariationalAndPrior(self):
with self.test_session() as sess:
_, _, variational, _, log_likelihood = mini_vae()
prior = normal.Normal(loc=3., scale=2.)
elbo = vi.elbo(
log_likelihood, variational_with_prior={variational: prior})
expected_elbo = log_likelihood - kullback_leibler.kl_divergence(
variational.distribution, prior)
sess.run(variables.global_variables_initializer())
self.assertAllEqual(*sess.run([expected_elbo, elbo]))
开发者ID:1000sprites,项目名称:tensorflow,代码行数:10,代码来源:variational_inference_test.py
示例15: testRegistration
def testRegistration(self):
class MyDist(normal.Normal):
pass
# Register KL to a lambda that spits out the name parameter
@kullback_leibler.RegisterKL(MyDist, MyDist)
def _kl(a, b, name=None): # pylint: disable=unused-argument,unused-variable
return name
a = MyDist(loc=0.0, scale=1.0)
self.assertEqual("OK", kullback_leibler.kl_divergence(a, a, name="OK"))
开发者ID:1000sprites,项目名称:tensorflow,代码行数:12,代码来源:kullback_leibler_test.py
示例16: testKLMultivariateToMultivariate
def testKLMultivariateToMultivariate(self):
# (1, 1, 2) batch of MVNDiag
mvn1 = mvn_diag_lib.MultivariateNormalDiag(
loc=np.float32([[[[-1., 1, 3.], [2., 4., 3.]]]]),
scale_diag=np.float32([[[0.2, 0.1, 5.], [2., 3., 4.]]]))
ind1 = independent_lib.Independent(
distribution=mvn1, reinterpreted_batch_ndims=2)
# (1, 1, 2) batch of MVNDiag
mvn2 = mvn_diag_lib.MultivariateNormalDiag(
loc=np.float32([[[[-2., 3, 2.], [1., 3., 2.]]]]),
scale_diag=np.float32([[[0.1, 0.5, 3.], [1., 2., 1.]]]))
ind2 = independent_lib.Independent(
distribution=mvn2, reinterpreted_batch_ndims=2)
mvn_kl = kullback_leibler.kl_divergence(mvn1, mvn2)
ind_kl = kullback_leibler.kl_divergence(ind1, ind2)
self.assertAllClose(
self.evaluate(math_ops.reduce_sum(mvn_kl, axis=[-1, -2])),
self.evaluate(ind_kl))
开发者ID:AndrewTwinz,项目名称:tensorflow,代码行数:21,代码来源:independent_test.py
示例17: testKLIdentity
def testKLIdentity(self):
normal1 = normal_lib.Normal(
loc=np.float32([-1., 1]),
scale=np.float32([0.1, 0.5]))
# This is functionally just a wrapper around normal1,
# and doesn't change any outputs.
ind1 = independent_lib.Independent(
distribution=normal1, reinterpreted_batch_ndims=0)
normal2 = normal_lib.Normal(
loc=np.float32([-3., 3]),
scale=np.float32([0.3, 0.3]))
# This is functionally just a wrapper around normal2,
# and doesn't change any outputs.
ind2 = independent_lib.Independent(
distribution=normal2, reinterpreted_batch_ndims=0)
normal_kl = kullback_leibler.kl_divergence(normal1, normal2)
ind_kl = kullback_leibler.kl_divergence(ind1, ind2)
self.assertAllClose(
self.evaluate(normal_kl), self.evaluate(ind_kl))
开发者ID:AndrewTwinz,项目名称:tensorflow,代码行数:21,代码来源:independent_test.py
示例18: testIndirectRegistration
def testIndirectRegistration(self):
class Sub1(normal.Normal):
pass
class Sub2(normal.Normal):
pass
class Sub11(Sub1):
pass
# pylint: disable=unused-argument,unused-variable
@kullback_leibler.RegisterKL(Sub1, Sub1)
def _kl11(a, b, name=None):
return "sub1-1"
@kullback_leibler.RegisterKL(Sub1, Sub2)
def _kl12(a, b, name=None):
return "sub1-2"
@kullback_leibler.RegisterKL(Sub2, Sub1)
def _kl21(a, b, name=None):
return "sub2-1"
# pylint: enable=unused-argument,unused_variable
sub1 = Sub1(loc=0.0, scale=1.0)
sub2 = Sub2(loc=0.0, scale=1.0)
sub11 = Sub11(loc=0.0, scale=1.0)
self.assertEqual("sub1-1", kullback_leibler.kl_divergence(sub1, sub1))
self.assertEqual("sub1-2", kullback_leibler.kl_divergence(sub1, sub2))
self.assertEqual("sub2-1", kullback_leibler.kl_divergence(sub2, sub1))
self.assertEqual("sub1-1", kullback_leibler.kl_divergence(sub11, sub11))
self.assertEqual("sub1-1", kullback_leibler.kl_divergence(sub11, sub1))
self.assertEqual("sub1-2", kullback_leibler.kl_divergence(sub11, sub2))
self.assertEqual("sub1-1", kullback_leibler.kl_divergence(sub11, sub1))
self.assertEqual("sub1-2", kullback_leibler.kl_divergence(sub11, sub2))
self.assertEqual("sub2-1", kullback_leibler.kl_divergence(sub2, sub11))
self.assertEqual("sub1-1", kullback_leibler.kl_divergence(sub1, sub11))
开发者ID:1000sprites,项目名称:tensorflow,代码行数:40,代码来源:kullback_leibler_test.py
示例19: __init__
def __init__(
self,
units,
activation=None,
activity_regularizer=None,
trainable=True,
kernel_use_local_reparameterization=True,
kernel_posterior_fn=default_mean_field_normal_fn(),
kernel_posterior_tensor_fn=lambda d: d.sample(),
kernel_prior_fn=lambda dtype, *args: normal_lib.Normal( # pylint: disable=g-long-lambda
loc=dtype.as_numpy_dtype(0.), scale=dtype.as_numpy_dtype(1.)),
kernel_divergence_fn=lambda q, p, ignore: kl_lib.kl_divergence(q, p),
bias_posterior_fn=default_mean_field_normal_fn(is_singular=True),
bias_posterior_tensor_fn=lambda d: d.sample(),
bias_prior_fn=None,
bias_divergence_fn=lambda q, p, ignore: kl_lib.kl_divergence(q, p),
name=None,
**kwargs):
super(DenseVariational, self).__init__(
trainable=trainable,
name=name,
activity_regularizer=activity_regularizer,
**kwargs)
self._units = units
self._activation = activation
self._input_spec = layers_lib.InputSpec(min_ndim=2)
self._kernel_use_local_reparameterization = (
kernel_use_local_reparameterization)
self._kernel = VariationalKernelParameter(
kernel_posterior_fn,
kernel_posterior_tensor_fn,
kernel_prior_fn,
kernel_divergence_fn)
self._bias = VariationalParameter(
bias_posterior_fn,
bias_posterior_tensor_fn,
bias_prior_fn,
bias_divergence_fn)
开发者ID:Kongsea,项目名称:tensorflow,代码行数:38,代码来源:layers_dense_variational_impl.py
示例20: testBernoulliBernoulliKL
def testBernoulliBernoulliKL(self):
batch_size = 6
a_p = np.array([0.5] * batch_size, dtype=np.float32)
b_p = np.array([0.4] * batch_size, dtype=np.float32)
a = bernoulli.Bernoulli(probs=a_p)
b = bernoulli.Bernoulli(probs=b_p)
kl = kullback_leibler.kl_divergence(a, b)
kl_val = self.evaluate(kl)
kl_expected = (a_p * np.log(a_p / b_p) + (1. - a_p) * np.log(
(1. - a_p) / (1. - b_p)))
self.assertEqual(kl.get_shape(), (batch_size,))
self.assertAllClose(kl_val, kl_expected)
开发者ID:Huoxubeiyin,项目名称:tensorflow,代码行数:16,代码来源:bernoulli_test.py
注:本文中的tensorflow.python.ops.distributions.kullback_leibler.kl_divergence函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。 |
请发表评论