本文整理汇总了Python中tensorflow.python.ops.linalg_ops.matrix_determinant函数的典型用法代码示例。如果您正苦于以下问题:Python matrix_determinant函数的具体用法?Python matrix_determinant怎么用?Python matrix_determinant使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了matrix_determinant函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。
示例1: benchmarkMatrixDeterminantOp
def benchmarkMatrixDeterminantOp(self):
for shape in self.shapes:
with ops.Graph().as_default(), session.Session(
config=benchmark.benchmark_config()) as sess, ops.device("/cpu:0"):
matrix = self._GenerateMatrix(shape)
d = linalg_ops.matrix_determinant(matrix)
variables.global_variables_initializer().run()
self.run_op_benchmark(
sess,
control_flow_ops.group(
d,),
min_iters=25,
name="matrix_determinant_cpu_{shape}".format(shape=shape))
if test.is_gpu_available(True):
with ops.Graph().as_default(), session.Session(
config=benchmark.benchmark_config()) as sess, ops.device("/gpu:0"):
matrix = self._GenerateMatrix(shape)
d = linalg_ops.matrix_determinant(matrix)
variables.global_variables_initializer().run()
self.run_op_benchmark(
sess,
control_flow_ops.group(
d,),
min_iters=25,
name="matrix_determinant_gpu_{shape}".format(shape=shape))
开发者ID:abhinav-upadhyay,项目名称:tensorflow,代码行数:26,代码来源:determinant_op_test.py
示例2: testConcurrentExecutesWithoutError
def testConcurrentExecutesWithoutError(self):
with self.session(use_gpu=True) as sess:
matrix1 = random_ops.random_normal([5, 5], seed=42)
matrix2 = random_ops.random_normal([5, 5], seed=42)
det1 = linalg_ops.matrix_determinant(matrix1)
det2 = linalg_ops.matrix_determinant(matrix2)
det1_val, det2_val = sess.run([det1, det2])
self.assertEqual(det1_val, det2_val)
开发者ID:abhinav-upadhyay,项目名称:tensorflow,代码行数:8,代码来源:determinant_op_test.py
示例3: _determinant
def _determinant(self):
logging.warn(
"Using (possibly slow) default implementation of determinant."
" Requires conversion to a dense matrix and O(N^3) operations.")
if self._can_use_cholesky():
return math_ops.exp(self.log_abs_determinant())
return linalg_ops.matrix_determinant(self._matrix)
开发者ID:1000sprites,项目名称:tensorflow,代码行数:7,代码来源:linear_operator.py
示例4: test_det
def test_det(self):
with self.session(graph=ops.Graph()) as sess:
sess.graph.seed = random_seed.DEFAULT_GRAPH_SEED
operator, mat = self.operator_and_matrix(
shapes_info, dtype, use_placeholder=use_placeholder)
op_det = operator.determinant()
if not use_placeholder:
self.assertAllEqual(shapes_info.shape[:-2], op_det.get_shape())
op_det_v, mat_det_v = sess.run(
[op_det, linalg_ops.matrix_determinant(mat)])
self.assertAC(op_det_v, mat_det_v)
开发者ID:aritratony,项目名称:tensorflow,代码行数:11,代码来源:linear_operator_test_util.py
示例5: testDeterminants
def testDeterminants(self):
with self.test_session():
for batch_shape in [(), (
2,
3,)]:
for k in [1, 4]:
operator, mat = self._build_operator_and_mat(batch_shape, k)
expected_det = linalg_ops.matrix_determinant(mat).eval()
self._compare_results(expected_det, operator.det())
self._compare_results(np.log(expected_det), operator.log_det())
开发者ID:AlbertXiebnu,项目名称:tensorflow,代码行数:11,代码来源:operator_test_util.py
示例6: testBatchGradientUnknownSize
def testBatchGradientUnknownSize(self):
with self.test_session():
batch_size = constant_op.constant(3)
matrix_size = constant_op.constant(4)
batch_identity = array_ops.tile(
array_ops.expand_dims(
array_ops.diag(array_ops.ones([matrix_size])), 0),
[batch_size, 1, 1])
determinants = linalg_ops.matrix_determinant(batch_identity)
reduced = math_ops.reduce_sum(determinants)
sum_grad = gradients_impl.gradients(reduced, batch_identity)[0]
self.assertAllClose(batch_identity.eval(), sum_grad.eval())
开发者ID:AnishShah,项目名称:tensorflow,代码行数:12,代码来源:linalg_grad_test.py
示例7: _determinant
def _determinant(self):
if self.is_positive_definite:
return math_ops.exp(self.log_abs_determinant())
# The matrix determinant lemma gives
# https://en.wikipedia.org/wiki/Matrix_determinant_lemma
# det(L + UDV^H) = det(D^{-1} + V^H L^{-1} U) det(D) det(L)
# = det(C) det(D) det(L)
# where C is sometimes known as the capacitance matrix,
# C := D^{-1} + V^H L^{-1} U
det_c = linalg_ops.matrix_determinant(self._capacitance)
det_d = self.diag_operator.determinant()
det_l = self.base_operator.determinant()
return det_c * det_d * det_l
开发者ID:LugarkPirog,项目名称:tensorflow,代码行数:13,代码来源:linear_operator_udvh_update.py
示例8: test_det
def test_det(self):
self._skip_if_tests_to_skip_contains("det")
for use_placeholder in self._use_placeholder_options:
for build_info in self._operator_build_infos:
for dtype in self._dtypes_to_test:
with self.session(graph=ops.Graph()) as sess:
sess.graph.seed = random_seed.DEFAULT_GRAPH_SEED
operator, mat = self._operator_and_matrix(
build_info, dtype, use_placeholder=use_placeholder)
op_det = operator.determinant()
if not use_placeholder:
self.assertAllEqual(build_info.shape[:-2], op_det.get_shape())
op_det_v, mat_det_v = sess.run(
[op_det, linalg_ops.matrix_determinant(mat)])
self.assertAC(op_det_v, mat_det_v)
开发者ID:adit-chandra,项目名称:tensorflow,代码行数:15,代码来源:linear_operator_test_util.py
示例9: test_det
def test_det(self):
self._skip_if_tests_to_skip_contains("det")
for use_placeholder in False, True:
for shape in self._shapes_to_test:
for dtype in self._dtypes_to_test:
with self.test_session(graph=ops.Graph()) as sess:
sess.graph.seed = random_seed.DEFAULT_GRAPH_SEED
operator, mat, feed_dict = self._operator_and_mat_and_feed_dict(
shape, dtype, use_placeholder=use_placeholder)
op_det = operator.determinant()
if not use_placeholder:
self.assertAllEqual(shape[:-2], op_det.get_shape())
op_det_v, mat_det_v = sess.run(
[op_det, linalg_ops.matrix_determinant(mat)],
feed_dict=feed_dict)
self.assertAC(op_det_v, mat_det_v)
开发者ID:1000sprites,项目名称:tensorflow,代码行数:16,代码来源:linear_operator_test_util.py
示例10: _log_abs_determinant
def _log_abs_determinant(self):
# Recall
# det(L + UDV^H) = det(D^{-1} + V^H L^{-1} U) det(D) det(L)
# = det(C) det(D) det(L)
log_abs_det_d = self.diag_operator.log_abs_determinant()
log_abs_det_l = self.base_operator.log_abs_determinant()
if self._use_cholesky:
chol_cap_diag = array_ops.matrix_diag_part(self._chol_capacitance)
log_abs_det_c = 2 * math_ops.reduce_sum(
math_ops.log(chol_cap_diag), reduction_indices=[-1])
else:
det_c = linalg_ops.matrix_determinant(self._capacitance)
log_abs_det_c = math_ops.log(math_ops.abs(det_c))
return log_abs_det_c + log_abs_det_d + log_abs_det_l
开发者ID:LugarkPirog,项目名称:tensorflow,代码行数:16,代码来源:linear_operator_udvh_update.py
示例11: test_det
def test_det(self):
self._maybe_skip("det")
with self.test_session() as sess:
for use_placeholder in False, True:
for shape in self._shapes_to_test:
for dtype in self._dtypes_to_test:
if dtype.is_complex:
self.skipTest(
"tf.matrix_determinant does not work with complex, so this "
"test is being skipped.")
operator, mat, feed_dict = self._operator_and_mat_and_feed_dict(
shape, dtype, use_placeholder=use_placeholder)
op_det = operator.determinant()
if not use_placeholder:
self.assertAllEqual(shape[:-2], op_det.get_shape())
op_det_v, mat_det_v = sess.run(
[op_det, linalg_ops.matrix_determinant(mat)],
feed_dict=feed_dict)
self.assertAC(op_det_v, mat_det_v)
开发者ID:kadeng,项目名称:tensorflow,代码行数:19,代码来源:linear_operator_test_util.py
示例12: sqrt_log_abs_det
def sqrt_log_abs_det(self):
"""Computes (log o abs o det)(X) for matrix X.
Doesn't actually do the sqrt! Named as such to agree with API.
To compute det(M + V D V.T), we use the matrix determinant lemma:
det(Tril + V D V.T) = det(C) det(D) det(M)
where C is defined as in `_inverse`, ie,
C = inv(D) + V.T inv(M) V.
See: https://en.wikipedia.org/wiki/Matrix_determinant_lemma
Returns:
log_abs_det: `Tensor`.
"""
log_det_c = math_ops.log(math_ops.abs(
linalg_ops.matrix_determinant(self._woodbury_sandwiched_term())))
# Reduction is ok because we always prepad inputs to this class.
log_det_m = math_ops.reduce_sum(math_ops.log(math_ops.abs(
array_ops.matrix_diag_part(self._m))), axis=[-1])
return log_det_c + 2. * self._d.sqrt_log_abs_det() + log_det_m
开发者ID:AlbertXiebnu,项目名称:tensorflow,代码行数:21,代码来源:affine_impl.py
示例13: test_log_abs_det
def test_log_abs_det(self):
self._skip_if_tests_to_skip_contains("log_abs_det")
for use_placeholder in False, True:
for shape in self._shapes_to_test:
for dtype in self._dtypes_to_test:
if dtype.is_complex:
self.skipTest(
"tf.matrix_determinant does not work with complex, so this "
"test is being skipped.")
with self.test_session(graph=ops.Graph()) as sess:
sess.graph.seed = random_seed.DEFAULT_GRAPH_SEED
operator, mat, feed_dict = self._operator_and_mat_and_feed_dict(
shape, dtype, use_placeholder=use_placeholder)
op_log_abs_det = operator.log_abs_determinant()
mat_log_abs_det = math_ops.log(
math_ops.abs(linalg_ops.matrix_determinant(mat)))
if not use_placeholder:
self.assertAllEqual(shape[:-2], op_log_abs_det.get_shape())
op_log_abs_det_v, mat_log_abs_det_v = sess.run(
[op_log_abs_det, mat_log_abs_det],
feed_dict=feed_dict)
self.assertAC(op_log_abs_det_v, mat_log_abs_det_v)
开发者ID:Joetz,项目名称:tensorflow,代码行数:22,代码来源:linear_operator_test_util.py
示例14: log_noninformative_covariance_prior
def log_noninformative_covariance_prior(covariance):
"""Compute a relatively uninformative prior for noise parameters.
Helpful for avoiding noise over-estimation, where noise otherwise decreases
very slowly during optimization.
See:
Villegas, C. On the A Priori Distribution of the Covariance Matrix.
Ann. Math. Statist. 40 (1969), no. 3, 1098--1099.
Args:
covariance: A covariance matrix.
Returns:
For a [p x p] matrix:
log(det(covariance)^(-(p + 1) / 2))
"""
# Avoid zero/negative determinants due to numerical errors
covariance += array_ops.diag(1e-8 * array_ops.ones(
shape=[array_ops.shape(covariance)[0]], dtype=covariance.dtype))
power = -(math_ops.cast(array_ops.shape(covariance)[0] + 1,
covariance.dtype) / 2.)
return power * math_ops.log(linalg_ops.matrix_determinant(covariance))
开发者ID:AutumnQYN,项目名称:tensorflow,代码行数:22,代码来源:math_utils.py
示例15: _compareDeterminant
def _compareDeterminant(self, matrix_x):
with test_util.use_gpu():
self._compareDeterminantBase(matrix_x,
linalg_ops.matrix_determinant(matrix_x))
self._compareLogDeterminantBase(
matrix_x, gen_linalg_ops.log_matrix_determinant(matrix_x))
开发者ID:Wajih-O,项目名称:tensorflow,代码行数:6,代码来源:determinant_op_test.py
示例16: _compareDeterminant
def _compareDeterminant(self, matrix_x):
with self.cached_session(use_gpu=True):
self._compareDeterminantBase(matrix_x,
linalg_ops.matrix_determinant(matrix_x))
self._compareLogDeterminantBase(
matrix_x, gen_linalg_ops.log_matrix_determinant(matrix_x))
开发者ID:abhinav-upadhyay,项目名称:tensorflow,代码行数:6,代码来源:determinant_op_test.py
示例17: testWrongDimensions
def testWrongDimensions(self):
# The input to the determinant should be a 2-dimensional tensor.
tensor1 = constant_op.constant([1., 2.])
with self.assertRaises(ValueError):
linalg_ops.matrix_determinant(tensor1)
开发者ID:abhinav-upadhyay,项目名称:tensorflow,代码行数:5,代码来源:determinant_op_test.py
示例18: _compareDeterminant
def _compareDeterminant(self, matrix_x):
with self.test_session():
self._compareDeterminantBase(matrix_x,
linalg_ops.matrix_determinant(matrix_x))
开发者ID:AlbertXiebnu,项目名称:tensorflow,代码行数:4,代码来源:determinant_op_test.py
示例19: testNonSquareMatrix
def testNonSquareMatrix(self):
# When the determinant of a non-square matrix is attempted we should return
# an error
with self.assertRaises(ValueError):
linalg_ops.matrix_determinant(
np.array([[1., 2., 3.], [3., 5., 4.]]).astype(np.float32))
开发者ID:abhinav-upadhyay,项目名称:tensorflow,代码行数:6,代码来源:determinant_op_test.py
示例20: _determinant
def _determinant(self):
if self._can_use_cholesky():
return math_ops.exp(self.log_abs_determinant())
return linalg_ops.matrix_determinant(self._matrix)
开发者ID:maony,项目名称:tensorflow,代码行数:4,代码来源:linear_operator.py
注:本文中的tensorflow.python.ops.linalg_ops.matrix_determinant函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。 |
请发表评论