本文整理汇总了Python中tensorflow.python.ops.special_math_ops.lbeta函数的典型用法代码示例。如果您正苦于以下问题:Python lbeta函数的具体用法?Python lbeta怎么用?Python lbeta使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了lbeta函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。
示例1: test_empty_rank2_or_greater_input_gives_empty_output_dynamic_alloc
def test_empty_rank2_or_greater_input_gives_empty_output_dynamic_alloc(self):
with self.test_session(use_gpu=self._use_gpu):
ph = array_ops.placeholder(dtypes.float32)
self.assertAllEqual(
[], special_math_ops.lbeta(ph).eval(feed_dict={ph: [[]]}))
self.assertAllEqual(
[[]], special_math_ops.lbeta(ph).eval(feed_dict={ph: [[[]]]}))
开发者ID:Immexxx,项目名称:tensorflow,代码行数:7,代码来源:special_math_ops_test.py
示例2: test_two_dimensional_arg
def test_two_dimensional_arg(self):
# Should evaluate to 1/2.
x_one_half = [[2, 1.], [2, 1.]]
with self.test_session(use_gpu=self._use_gpu):
self.assertAllClose(
[0.5, 0.5], math_ops.exp(special_math_ops.lbeta(x_one_half)).eval())
self.assertEqual((2,), special_math_ops.lbeta(x_one_half).get_shape())
开发者ID:Immexxx,项目名称:tensorflow,代码行数:7,代码来源:special_math_ops_test.py
示例3: _log_prob
def _log_prob(self, counts):
counts = self._assert_valid_counts(counts)
ordered_prob = (special_math_ops.lbeta(self.alpha + counts) -
special_math_ops.lbeta(self.alpha))
log_prob = ordered_prob + distribution_util.log_combinations(
self.n, counts)
return log_prob
开发者ID:ivankreso,项目名称:tensorflow,代码行数:7,代码来源:dirichlet_multinomial.py
示例4: _log_prob
def _log_prob(self, counts):
counts = self._maybe_assert_valid_sample(counts)
ordered_prob = (
special_math_ops.lbeta(self.concentration + counts)
- special_math_ops.lbeta(self.concentration))
return ordered_prob + distribution_util.log_combinations(
self.total_count, counts)
开发者ID:AbhinavJain13,项目名称:tensorflow,代码行数:7,代码来源:dirichlet_multinomial.py
示例5: log_pmf
def log_pmf(self, counts, name=None):
"""`Log(P[counts])`, computed for every batch member.
For each batch of counts `[c_1,...,c_k]`, `P[counts]` is the probability
that after sampling `sum_j c_j` draws from this Dirichlet Multinomial
distribution, the number of draws falling in class `j` is `c_j`. Note that
different sequences of draws can result in the same counts, thus the
probability includes a combinatorial coefficient.
Args:
counts: Non-negative `float`, `double`, or `int` tensor whose shape can
be broadcast with `self.alpha`. For fixed leading dimensions, the last
dimension represents counts for the corresponding Dirichlet Multinomial
distribution in `self.alpha`.
name: Name to give this Op, defaults to "log_pmf".
Returns:
Log probabilities for each record, shape `[N1,...,Nn]`.
"""
alpha = self._alpha
with ops.op_scope([alpha, counts], name, 'log_pmf'):
counts = self._check_counts(counts)
ordered_pmf = (special_math_ops.lbeta(alpha + counts) -
special_math_ops.lbeta(alpha))
log_pmf = ordered_pmf + _log_combinations(counts)
# If alpha = counts = [[]], ordered_pmf carries the right shape, which is
# []. However, since reduce_sum([[]]) = [0], log_combinations = [0],
# which is not correct. Luckily, [] + [0] = [], so the sum is fine, but
# shape must be inferred from ordered_pmf.
# Note also that tf.constant([]).get_shape() = TensorShape([Dimension(0)])
log_pmf.set_shape(ordered_pmf.get_shape())
return log_pmf
开发者ID:01bui,项目名称:tensorflow,代码行数:32,代码来源:dirichlet_multinomial.py
示例6: log_prob
def log_prob(self, counts, name="log_prob"):
"""`Log(P[counts])`, computed for every batch member.
For each batch of counts `[n_1,...,n_k]`, `P[counts]` is the probability
that after sampling `n` draws from this Dirichlet Multinomial
distribution, the number of draws falling in class `j` is `n_j`. Note that
different sequences of draws can result in the same counts, thus the
probability includes a combinatorial coefficient.
Args:
counts: Non-negative tensor with dtype `dtype` and whose shape can be
broadcast with `self.alpha`. For fixed leading dimensions, the last
dimension represents counts for the corresponding Dirichlet Multinomial
distribution in `self.alpha`. `counts` is only legal if it sums up to
`n` and its components are equal to integer values.
name: Name to give this Op, defaults to "log_prob".
Returns:
Log probabilities for each record, shape `[N1,...,Nn]`.
"""
n = self._n
alpha = self._alpha
with ops.name_scope(self.name):
with ops.name_scope(name, values=[n, alpha, counts]):
counts = self._check_counts(counts)
ordered_prob = (special_math_ops.lbeta(alpha + counts) -
special_math_ops.lbeta(alpha))
log_prob = ordered_prob + distribution_util.log_combinations(
n, counts)
return log_prob
开发者ID:AriaAsuka,项目名称:tensorflow,代码行数:31,代码来源:dirichlet_multinomial.py
示例7: test_complicated_shape
def test_complicated_shape(self):
with self.session(use_gpu=True):
x = ops.convert_to_tensor(np.random.rand(3, 2, 2))
self.assertAllEqual(
(3, 2), self.evaluate(array_ops.shape(special_math_ops.lbeta(x))))
self.assertEqual(
tensor_shape.TensorShape([3, 2]),
special_math_ops.lbeta(x).get_shape())
开发者ID:Wajih-O,项目名称:tensorflow,代码行数:8,代码来源:special_math_ops_test.py
示例8: test_one_dimensional_arg
def test_one_dimensional_arg(self):
# Should evaluate to 1 and 1/2.
x_one = [1, 1.]
x_one_half = [2, 1.]
with self.test_session(use_gpu=self._use_gpu):
self.assertAllClose(1, math_ops.exp(special_math_ops.lbeta(x_one)).eval())
self.assertAllClose(
0.5, math_ops.exp(special_math_ops.lbeta(x_one_half)).eval())
self.assertEqual([], special_math_ops.lbeta(x_one).get_shape())
开发者ID:Immexxx,项目名称:tensorflow,代码行数:9,代码来源:special_math_ops_test.py
示例9: test_length_1_last_dimension_results_in_one
def test_length_1_last_dimension_results_in_one(self):
# If there is only one coefficient, the formula still works, and we get one
# as the answer, always.
x_a = [5.5]
x_b = [0.1]
with self.test_session(use_gpu=True):
self.assertAllClose(1, math_ops.exp(special_math_ops.lbeta(x_a)).eval())
self.assertAllClose(1, math_ops.exp(special_math_ops.lbeta(x_b)).eval())
self.assertEqual((), special_math_ops.lbeta(x_a).get_shape())
开发者ID:ChengYuXiang,项目名称:tensorflow,代码行数:9,代码来源:special_math_ops_test.py
示例10: test_two_dimensional_proper_shape
def test_two_dimensional_proper_shape(self):
# Should evaluate to 1/2.
x_one_half = [[2, 1.], [2, 1.]]
with self.test_session(use_gpu=True):
self.assertAllClose(
[0.5, 0.5], math_ops.exp(special_math_ops.lbeta(x_one_half)).eval())
self.assertEqual(
(2,), array_ops.shape(special_math_ops.lbeta(x_one_half)).eval())
self.assertEqual(
tensor_shape.TensorShape([2]),
special_math_ops.lbeta(x_one_half).get_shape())
开发者ID:Lin-jipeng,项目名称:tensorflow,代码行数:11,代码来源:special_math_ops_test.py
示例11: test_two_dimensional_arg_dynamic
def test_two_dimensional_arg_dynamic(self):
# Should evaluate to 1/2.
x_one_half = [[2, 1.], [2, 1.]]
with self.test_session(use_gpu=True):
ph = array_ops.placeholder(dtypes.float32)
beta_ph = math_ops.exp(special_math_ops.lbeta(ph))
self.assertAllClose([0.5, 0.5], beta_ph.eval(feed_dict={ph: x_one_half}))
开发者ID:ChengYuXiang,项目名称:tensorflow,代码行数:7,代码来源:special_math_ops_test.py
示例12: test_empty_rank1_returns_negative_infinity
def test_empty_rank1_returns_negative_infinity(self):
with self.test_session(use_gpu=True):
x = constant_op.constant([], shape=[0])
lbeta_x = special_math_ops.lbeta(x)
expected_result = constant_op.constant(-np.inf, shape=())
self.assertAllEqual(expected_result.eval(), lbeta_x.eval())
self.assertEqual(expected_result.get_shape(), lbeta_x.get_shape())
开发者ID:ChengYuXiang,项目名称:tensorflow,代码行数:8,代码来源:special_math_ops_test.py
示例13: _log_prob
def _log_prob(self, x):
x = ops.convert_to_tensor(x, name="x")
x = self._assert_valid_sample(x)
unnorm_prob = (self.alpha - 1.) * math_ops.log(x)
log_prob = math_ops.reduce_sum(
unnorm_prob, reduction_indices=[-1],
keep_dims=False) - special_math_ops.lbeta(self.alpha)
return log_prob
开发者ID:curtiszimmerman,项目名称:tensorflow,代码行数:8,代码来源:dirichlet.py
示例14: test_one_dimensional_arg_dynamic_alloc
def test_one_dimensional_arg_dynamic_alloc(self):
# Should evaluate to 1 and 1/2.
x_one = [1, 1.]
x_one_half = [2, 1.]
with self.test_session(use_gpu=self._use_gpu):
ph = array_ops.placeholder(dtypes.float32)
beta_ph = math_ops.exp(special_math_ops.lbeta(ph))
self.assertAllClose(1, beta_ph.eval(feed_dict={ph: x_one}))
self.assertAllClose(0.5, beta_ph.eval(feed_dict={ph: x_one_half}))
开发者ID:Immexxx,项目名称:tensorflow,代码行数:9,代码来源:special_math_ops_test.py
示例15: _entropy
def _entropy(self):
u = array_ops.expand_dims(self.df * self._ones(), -1)
v = array_ops.expand_dims(self._ones(), -1)
beta_arg = array_ops.concat_v2([u, v], len(u.get_shape()) - 1) / 2
half_df = 0.5 * self.df
return ((0.5 + half_df) *
(math_ops.digamma(0.5 + half_df) - math_ops.digamma(half_df)) + 0.5
* math_ops.log(self.df) + special_math_ops.lbeta(beta_arg) +
math_ops.log(self.sigma))
开发者ID:kadeng,项目名称:tensorflow,代码行数:9,代码来源:student_t.py
示例16: _entropy
def _entropy(self):
entropy = special_math_ops.lbeta(self.alpha)
entropy += math_ops.digamma(self.alpha_sum) * (
self.alpha_sum - math_ops.cast(self.event_shape()[0], self.dtype))
entropy += -math_ops.reduce_sum(
(self.alpha - 1.) * math_ops.digamma(self.alpha),
reduction_indices=[-1],
keep_dims=False)
return entropy
开发者ID:curtiszimmerman,项目名称:tensorflow,代码行数:9,代码来源:dirichlet.py
示例17: log_pmf
def log_pmf(self, counts, name='log_pmf'):
"""`Log(P[counts])`, computed for every batch member.
For each batch of counts `[n_1,...,n_k]`, `P[counts]` is the probability
that after sampling `n` draws from this Dirichlet Multinomial
distribution, the number of draws falling in class `j` is `n_j`. Note that
different sequences of draws can result in the same counts, thus the
probability includes a combinatorial coefficient.
Args:
counts: Non-negative `float` or `double` tensor whose shape can
be broadcast with `self.alpha`. For fixed leading dimensions, the last
dimension represents counts for the corresponding Dirichlet Multinomial
distribution in `self.alpha`. `counts` is only legal if it sums up to
`n` and its components are equal to integral values. The second
condition is relaxed if `allow_arbitrary_counts` is set.
name: Name to give this Op, defaults to "log_pmf".
Returns:
Log probabilities for each record, shape `[N1,...,Nn]`.
"""
n = self._n
alpha = self._alpha
with ops.name_scope(self.name):
with ops.op_scope([n, alpha, counts], name):
counts = self._check_counts(counts)
# Use the same dtype as alpha for computations.
counts = math_ops.cast(counts, self.dtype)
ordered_pmf = (special_math_ops.lbeta(alpha + counts) -
special_math_ops.lbeta(alpha))
log_pmf = ordered_pmf + _log_combinations(n, counts)
# If alpha = counts = [[]], ordered_pmf carries the right shape, which
# is []. However, since reduce_sum([[]]) = [0], log_combinations = [0],
# which is not correct. Luckily, [] + [0] = [], so the sum is fine, but
# shape must be inferred from ordered_pmf. We must also make this
# broadcastable with n, so this is multiplied by n to ensure the shape
# is correctly inferred.
# Note also that tf.constant([]).get_shape() =
# TensorShape([Dimension(0)])
broadcasted_tensor = ordered_pmf * n
log_pmf.set_shape(broadcasted_tensor.get_shape())
return log_pmf
开发者ID:Brandon-Tai,项目名称:tensorflow,代码行数:43,代码来源:dirichlet_multinomial.py
示例18: _entropy
def _entropy(self):
v = array_ops.ones(self.batch_shape_tensor(), dtype=self.dtype)[..., None]
u = v * self.df[..., None]
beta_arg = array_ops.concat([u, v], -1) / 2.
return (math_ops.log(math_ops.abs(self.scale)) +
0.5 * math_ops.log(self.df) +
special_math_ops.lbeta(beta_arg) +
0.5 * (self.df + 1.) *
(math_ops.digamma(0.5 * (self.df + 1.)) -
math_ops.digamma(0.5 * self.df)))
开发者ID:Jackhuang945,项目名称:tensorflow,代码行数:10,代码来源:student_t.py
示例19: test_empty_rank2_with_zero_last_dim_returns_negative_infinity
def test_empty_rank2_with_zero_last_dim_returns_negative_infinity(self):
with self.test_session(use_gpu=True):
event_size = 0
for batch_size in [0, 1, 2]:
x = constant_op.constant([], shape=[batch_size, event_size])
lbeta_x = special_math_ops.lbeta(x)
expected_result = constant_op.constant(-np.inf, shape=[batch_size])
self.assertAllEqual(expected_result.eval(), lbeta_x.eval())
self.assertEqual(expected_result.get_shape(), lbeta_x.get_shape())
开发者ID:ChengYuXiang,项目名称:tensorflow,代码行数:10,代码来源:special_math_ops_test.py
示例20: test_empty_rank2_with_zero_batch_dim_returns_empty
def test_empty_rank2_with_zero_batch_dim_returns_empty(self):
with self.test_session(use_gpu=self._use_gpu):
batch_size = 0
for event_size in [0, 1, 2]:
x = constant_op.constant([], shape=[batch_size, event_size])
lbeta_x = special_math_ops.lbeta(x)
expected_result = constant_op.constant([], shape=[batch_size])
self.assertAllEqual(expected_result.eval(), lbeta_x.eval())
self.assertEqual(expected_result.get_shape(), lbeta_x.get_shape())
开发者ID:AlbertXiebnu,项目名称:tensorflow,代码行数:11,代码来源:special_math_ops_test.py
注:本文中的tensorflow.python.ops.special_math_ops.lbeta函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。 |
请发表评论