本文整理汇总了Python中tensorflow.python.ops.array_ops.matrix_diag_part函数的典型用法代码示例。如果您正苦于以下问题:Python matrix_diag_part函数的具体用法?Python matrix_diag_part怎么用?Python matrix_diag_part使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了matrix_diag_part函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。
示例1: testRectangular
def testRectangular(self):
with self.session(use_gpu=True):
mat = np.array([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]])
mat_diag = array_ops.matrix_diag_part(mat)
self.assertAllEqual(mat_diag.eval(), np.array([1.0, 5.0]))
mat = np.array([[1.0, 2.0], [3.0, 4.0], [5.0, 6.0]])
mat_diag = array_ops.matrix_diag_part(mat)
self.assertAllEqual(mat_diag.eval(), np.array([1.0, 4.0]))
开发者ID:JonathanRaiman,项目名称:tensorflow,代码行数:8,代码来源:diag_op_test.py
示例2: _variance
def _variance(self):
if distribution_util.is_diagonal_scale(self.scale):
return math_ops.square(self.scale.diag_part())
elif (isinstance(self.scale, linalg.LinearOperatorLowRankUpdate) and
self.scale.is_self_adjoint):
return array_ops.matrix_diag_part(
self.scale.matmul(self.scale.to_dense()))
else:
return array_ops.matrix_diag_part(
self.scale.matmul(self.scale.to_dense(), adjoint_arg=True))
开发者ID:didukhle,项目名称:tensorflow,代码行数:10,代码来源:mvn_linear_operator.py
示例3: _stddev
def _stddev(self):
if distribution_util.is_diagonal_scale(self.scale):
return np.sqrt(2) * math_ops.abs(self.scale.diag_part())
elif (isinstance(self.scale, linalg.LinearOperatorUDVHUpdate)
and self.scale.is_self_adjoint):
return np.sqrt(2) * math_ops.sqrt(array_ops.matrix_diag_part(
self.scale.matmul(self.scale.to_dense())))
else:
return np.sqrt(2) * math_ops.sqrt(array_ops.matrix_diag_part(
self.scale.matmul(self.scale.to_dense(), adjoint_arg=True)))
开发者ID:1000sprites,项目名称:tensorflow,代码行数:10,代码来源:vector_laplace_linear_operator.py
示例4: _stddev
def _stddev(self):
if (isinstance(self.scale, linalg.LinearOperatorIdentity) or
isinstance(self.scale, linalg.LinearOperatorScaledIdentity) or
isinstance(self.scale, linalg.LinearOperatorDiag)):
return math_ops.abs(self.scale.diag_part())
elif (isinstance(self.scale, linalg.LinearOperatorUDVHUpdate)
and self.scale.is_self_adjoint):
return math_ops.sqrt(array_ops.matrix_diag_part(
self.scale.apply(self.scale.to_dense())))
else:
# TODO(b/35040238): Remove transpose once LinOp supports `transpose`.
return math_ops.sqrt(array_ops.matrix_diag_part(
self.scale.apply(array_ops.matrix_transpose(self.scale.to_dense()))))
开发者ID:jzuern,项目名称:tensorflow,代码行数:13,代码来源:mvn_linear_operator.py
示例5: testSample
def testSample(self):
with self.test_session():
scale = make_pd(1., 2)
df = 4
chol_w = distributions.WishartCholesky(
df, chol(scale), cholesky_input_output_matrices=False)
x = chol_w.sample(1, seed=42).eval()
chol_x = [chol(x[0])]
full_w = distributions.WishartFull(
df, scale, cholesky_input_output_matrices=False)
self.assertAllClose(x, full_w.sample(1, seed=42).eval())
chol_w_chol = distributions.WishartCholesky(
df, chol(scale), cholesky_input_output_matrices=True)
self.assertAllClose(chol_x, chol_w_chol.sample(1, seed=42).eval())
eigen_values = array_ops.matrix_diag_part(
chol_w_chol.sample(
1000, seed=42))
np.testing.assert_array_less(0., eigen_values.eval())
full_w_chol = distributions.WishartFull(
df, scale, cholesky_input_output_matrices=True)
self.assertAllClose(chol_x, full_w_chol.sample(1, seed=42).eval())
eigen_values = array_ops.matrix_diag_part(
full_w_chol.sample(
1000, seed=42))
np.testing.assert_array_less(0., eigen_values.eval())
# Check first and second moments.
df = 4.
chol_w = distributions.WishartCholesky(
df=df,
scale=chol(make_pd(1., 3)),
cholesky_input_output_matrices=False)
x = chol_w.sample(10000, seed=42)
self.assertAllEqual((10000, 3, 3), x.get_shape())
moment1_estimate = math_ops.reduce_mean(x, reduction_indices=[0]).eval()
self.assertAllClose(chol_w.mean().eval(), moment1_estimate, rtol=0.05)
# The Variance estimate uses the squares rather than outer-products
# because Wishart.Variance is the diagonal of the Wishart covariance
# matrix.
variance_estimate = (math_ops.reduce_mean(
math_ops.square(x), reduction_indices=[0]) -
math_ops.square(moment1_estimate)).eval()
self.assertAllClose(
chol_w.variance().eval(), variance_estimate, rtol=0.05)
开发者ID:BhaskarNallani,项目名称:tensorflow,代码行数:51,代码来源:wishart_test.py
示例6: _maybe_attach_assertion
def _maybe_attach_assertion(x):
if not validate_args:
return x
if assert_positive:
return control_flow_ops.with_dependencies([
check_ops.assert_positive(
array_ops.matrix_diag_part(x),
message="diagonal part must be positive"),
], x)
return control_flow_ops.with_dependencies([
check_ops.assert_none_equal(
array_ops.matrix_diag_part(x),
array_ops.zeros([], x.dtype),
message="diagonal part must be non-zero"),
], x)
开发者ID:AndrewTwinz,项目名称:tensorflow,代码行数:15,代码来源:distribution_util.py
示例7: _variance
def _variance(self):
x = math_ops.sqrt(self.df) * self.scale_operator_pd.to_dense()
d = array_ops.expand_dims(array_ops.matrix_diag_part(x), -1)
v = math_ops.square(x) + math_ops.matmul(d, d, adjoint_b=True)
if self.cholesky_input_output_matrices:
return linalg_ops.cholesky(v)
return v
开发者ID:ivankreso,项目名称:tensorflow,代码行数:7,代码来源:wishart.py
示例8: testSquare
def testSquare(self):
with self.session(use_gpu=True):
v = np.array([1.0, 2.0, 3.0])
mat = np.diag(v)
mat_diag = array_ops.matrix_diag_part(mat)
self.assertEqual((3,), mat_diag.get_shape())
self.assertAllEqual(mat_diag.eval(), v)
开发者ID:JonathanRaiman,项目名称:tensorflow,代码行数:7,代码来源:diag_op_test.py
示例9: testCovarianceFromSampling
def testCovarianceFromSampling(self):
alpha = np.array([[1., 2, 3],
[2.5, 4, 0.01]], dtype=np.float32)
with self.test_session() as sess:
dist = dirichlet_lib.Dirichlet(alpha) # batch_shape=[2], event_shape=[3]
x = dist.sample(int(250e3), seed=1)
sample_mean = math_ops.reduce_mean(x, 0)
x_centered = x - sample_mean[None, ...]
sample_cov = math_ops.reduce_mean(math_ops.matmul(
x_centered[..., None], x_centered[..., None, :]), 0)
sample_var = array_ops.matrix_diag_part(sample_cov)
sample_stddev = math_ops.sqrt(sample_var)
[
sample_mean_,
sample_cov_,
sample_var_,
sample_stddev_,
analytic_mean,
analytic_cov,
analytic_var,
analytic_stddev,
] = sess.run([
sample_mean,
sample_cov,
sample_var,
sample_stddev,
dist.mean(),
dist.covariance(),
dist.variance(),
dist.stddev(),
])
self.assertAllClose(sample_mean_, analytic_mean, atol=0., rtol=0.04)
self.assertAllClose(sample_cov_, analytic_cov, atol=0., rtol=0.06)
self.assertAllClose(sample_var_, analytic_var, atol=0., rtol=0.03)
self.assertAllClose(sample_stddev_, analytic_stddev, atol=0., rtol=0.02)
开发者ID:1000sprites,项目名称:tensorflow,代码行数:35,代码来源:dirichlet_test.py
示例10: logdet
def logdet(matrix, name=None):
"""Computes log of the determinant of a hermitian positive definite matrix.
```python
# Compute the determinant of a matrix while reducing the chance of over- or
underflow:
A = ... # shape 10 x 10
det = tf.exp(tf.logdet(A)) # scalar
```
Args:
matrix: A `Tensor`. Must be `float16`, `float32`, `float64`, `complex64`,
or `complex128` with shape `[..., M, M]`.
name: A name to give this `Op`. Defaults to `logdet`.
Returns:
The natural log of the determinant of `matrix`.
@compatibility(numpy)
Equivalent to numpy.linalg.slogdet, although no sign is returned since only
hermitian positive definite matrices are supported.
@end_compatibility
"""
# This uses the property that the log det(A) = 2*sum(log(real(diag(C))))
# where C is the cholesky decomposition of A.
with ops.name_scope(name, 'logdet', [matrix]):
chol = gen_linalg_ops.cholesky(matrix)
return 2.0 * math_ops.reduce_sum(
math_ops.log(math_ops.real(array_ops.matrix_diag_part(chol))),
reduction_indices=[-1])
开发者ID:AndrewTwinz,项目名称:tensorflow,代码行数:30,代码来源:linalg_impl.py
示例11: _forward_log_det_jacobian
def _forward_log_det_jacobian(self, x):
# We formulate the Jacobian with respect to the flattened matrices
# `vec(x)` and `vec(y)`. Suppose for notational convenience that
# the first `n` entries of `vec(x)` are the diagonal of `x`, and
# the remaining `n**2-n` entries are the off-diagonals in
# arbitrary order. Then the Jacobian is a block-diagonal matrix,
# with the Jacobian of the diagonal bijector in the first block,
# and the identity Jacobian for the remaining entries (since this
# bijector acts as the identity on non-diagonal entries):
#
# J_vec(x) (vec(y)) =
# -------------------------------
# | J_diag(x) (diag(y)) 0 | n entries
# | |
# | 0 I | n**2-n entries
# -------------------------------
# n n**2-n
#
# Since the log-det of the second (identity) block is zero, the
# overall log-det-jacobian is just the log-det of first block,
# from the diagonal bijector.
#
# Note that for elementwise operations (exp, softplus, etc) the
# first block of the Jacobian will itself be a diagonal matrix,
# but our implementation does not require this to be true.
return self._diag_bijector.forward_log_det_jacobian(
array_ops.matrix_diag_part(x), event_ndims=1)
开发者ID:Ajaycs99,项目名称:tensorflow,代码行数:27,代码来源:transform_diagonal.py
示例12: entropy_matched_cauchy_scale
def entropy_matched_cauchy_scale(covariance):
"""Approximates a similar Cauchy distribution given a covariance matrix.
Since Cauchy distributions do not have moments, entropy matching provides one
way to set a Cauchy's scale parameter in a way that provides a similar
distribution. The effect is dividing the standard deviation of an independent
Gaussian by a constant very near 3.
To set the scale of the Cauchy distribution, we first select the diagonals of
`covariance`. Since this ignores cross terms, it overestimates the entropy of
the Gaussian. For each of these variances, we solve for the Cauchy scale
parameter which gives the same entropy as the Gaussian with that
variance. This means setting the (univariate) Gaussian entropy
0.5 * ln(2 * variance * pi * e)
equal to the Cauchy entropy
ln(4 * pi * scale)
Solving, we get scale = sqrt(variance * (e / (8 pi))).
Args:
covariance: A [batch size x N x N] batch of covariance matrices to produce
Cauchy scales for.
Returns:
A [batch size x N] set of Cauchy scale parameters for each part of the batch
and each dimension of the input Gaussians.
"""
return math_ops.sqrt(math.e / (8. * math.pi) *
array_ops.matrix_diag_part(covariance))
开发者ID:AutumnQYN,项目名称:tensorflow,代码行数:27,代码来源:math_utils.py
示例13: _to_dense
def _to_dense(self):
normalized_axis = self.reflection_axis / linalg.norm(
self.reflection_axis, axis=-1, keepdims=True)
mat = normalized_axis[..., array_ops.newaxis]
matrix = -2 * math_ops.matmul(mat, mat, adjoint_b=True)
return array_ops.matrix_set_diag(
matrix, 1. + array_ops.matrix_diag_part(matrix))
开发者ID:aritratony,项目名称:tensorflow,代码行数:7,代码来源:linear_operator_householder.py
示例14: matrix_diag_transform
def matrix_diag_transform(matrix, transform=None, name=None):
"""Transform diagonal of [batch-]matrix, leave rest of matrix unchanged.
Create a trainable covariance defined by a Cholesky factor:
```python
# Transform network layer into 2 x 2 array.
matrix_values = tf.contrib.layers.fully_connected(activations, 4)
matrix = tf.reshape(matrix_values, (batch_size, 2, 2))
# Make the diagonal positive. If the upper triangle was zero, this would be a
# valid Cholesky factor.
chol = matrix_diag_transform(matrix, transform=tf.nn.softplus)
# OperatorPDCholesky ignores the upper triangle.
operator = OperatorPDCholesky(chol)
```
Example of heteroskedastic 2-D linear regression.
```python
# Get a trainable Cholesky factor.
matrix_values = tf.contrib.layers.fully_connected(activations, 4)
matrix = tf.reshape(matrix_values, (batch_size, 2, 2))
chol = matrix_diag_transform(matrix, transform=tf.nn.softplus)
# Get a trainable mean.
mu = tf.contrib.layers.fully_connected(activations, 2)
# This is a fully trainable multivariate normal!
dist = tf.contrib.distributions.MVNCholesky(mu, chol)
# Standard log loss. Minimizing this will "train" mu and chol, and then dist
# will be a distribution predicting labels as multivariate Gaussians.
loss = -1 * tf.reduce_mean(dist.log_prob(labels))
```
Args:
matrix: Rank `R` `Tensor`, `R >= 2`, where the last two dimensions are
equal.
transform: Element-wise function mapping `Tensors` to `Tensors`. To
be applied to the diagonal of `matrix`. If `None`, `matrix` is returned
unchanged. Defaults to `None`.
name: A name to give created ops.
Defaults to "matrix_diag_transform".
Returns:
A `Tensor` with same shape and `dtype` as `matrix`.
"""
with ops.name_scope(name, "matrix_diag_transform", [matrix]):
matrix = ops.convert_to_tensor(matrix, name="matrix")
if transform is None:
return matrix
# Replace the diag with transformed diag.
diag = array_ops.matrix_diag_part(matrix)
transformed_diag = transform(diag)
transformed_mat = array_ops.matrix_set_diag(matrix, transformed_diag)
return transformed_mat
开发者ID:Jackhuang945,项目名称:tensorflow,代码行数:59,代码来源:distribution_util.py
示例15: sign_magnitude_positive_definite
def sign_magnitude_positive_definite(
raw, off_diagonal_scale=0., overall_scale=0.):
"""Constructs a positive definite matrix from an unconstrained input matrix.
We want to keep the whole matrix on a log scale, but also allow off-diagonal
elements to be negative, so the sign of off-diagonal elements is modeled
separately from their magnitude (using the lower and upper triangles
respectively). Specifically:
for i < j, we have:
output_cholesky[i, j] = raw[j, i] / (abs(raw[j, i]) + 1) *
exp((off_diagonal_scale + overall_scale + raw[i, j]) / 2)
output_cholesky[i, i] = exp((raw[i, i] + overall_scale) / 2)
output = output_cholesky^T * output_cholesky
where raw, off_diagonal_scale, and overall_scale are
un-constrained real-valued variables. The resulting values are stable
around zero due to the exponential (and the softsign keeps the function
smooth).
Args:
raw: A [..., M, M] Tensor.
off_diagonal_scale: A scalar or [...] shaped Tensor controlling the relative
scale of off-diagonal values in the output matrix.
overall_scale: A scalar or [...] shaped Tensor controlling the overall scale
of the output matrix.
Returns:
The `output` matrix described above, a [..., M, M] positive definite matrix.
"""
raw = ops.convert_to_tensor(raw)
diagonal = array_ops.matrix_diag_part(raw)
def _right_pad_with_ones(tensor, target_rank):
# Allow broadcasting even if overall_scale and off_diagonal_scale have batch
# dimensions
tensor = ops.convert_to_tensor(tensor, dtype=raw.dtype.base_dtype)
return array_ops.reshape(tensor,
array_ops.concat(
[
array_ops.shape(tensor), array_ops.ones(
[target_rank - array_ops.rank(tensor)],
dtype=target_rank.dtype)
],
axis=0))
# We divide the log values by 2 to compensate for the squaring that happens
# when transforming Cholesky factors into positive definite matrices.
sign_magnitude = (gen_math_ops.exp(
(raw + _right_pad_with_ones(off_diagonal_scale, array_ops.rank(raw)) +
_right_pad_with_ones(overall_scale, array_ops.rank(raw))) / 2.) *
nn.softsign(array_ops.matrix_transpose(raw)))
sign_magnitude.set_shape(raw.get_shape())
cholesky_factor = array_ops.matrix_set_diag(
input=array_ops.matrix_band_part(sign_magnitude, 0, -1),
diagonal=gen_math_ops.exp((diagonal + _right_pad_with_ones(
overall_scale, array_ops.rank(diagonal))) / 2.))
return math_ops.matmul(cholesky_factor, cholesky_factor, transpose_a=True)
开发者ID:AutumnQYN,项目名称:tensorflow,代码行数:58,代码来源:math_utils.py
示例16: _log_abs_determinant
def _log_abs_determinant(self):
logging.warn(
"Using (possibly slow) default implementation of determinant."
" Requires conversion to a dense matrix and O(N^3) operations.")
if self._can_use_cholesky():
diag = array_ops.matrix_diag_part(self._get_cached_chol())
return 2 * math_ops.reduce_sum(math_ops.log(diag), reduction_indices=[-1])
abs_det = math_ops.abs(self.determinant())
return math_ops.log(abs_det)
开发者ID:1000sprites,项目名称:tensorflow,代码行数:9,代码来源:linear_operator.py
示例17: _GradWithInverseL
def _GradWithInverseL(l, l_inverse, grad):
middle = math_ops.matmul(l, grad, adjoint_a=True)
middle = array_ops.matrix_set_diag(middle,
0.5 * array_ops.matrix_diag_part(middle))
middle = array_ops.matrix_band_part(middle, -1, 0)
grad_a = math_ops.matmul(
math_ops.matmul(l_inverse, middle, adjoint_a=True), l_inverse)
grad_a += math_ops.conj(array_ops.matrix_transpose(grad_a))
return grad_a * 0.5
开发者ID:AbhinavJain13,项目名称:tensorflow,代码行数:9,代码来源:cholesky_op_test.py
示例18: _log_abs_determinant
def _log_abs_determinant(self):
logging.warn(
"Using (possibly slow) default implementation of determinant."
" Requires conversion to a dense matrix and O(N^3) operations.")
if self._can_use_cholesky():
diag = array_ops.matrix_diag_part(linalg_ops.cholesky(self.to_dense()))
return 2 * math_ops.reduce_sum(math_ops.log(diag), axis=[-1])
_, log_abs_det = linalg.slogdet(self.to_dense())
return log_abs_det
开发者ID:aritratony,项目名称:tensorflow,代码行数:9,代码来源:linear_operator.py
示例19: testRectangularBatch
def testRectangularBatch(self):
with self.session(use_gpu=True):
v_batch = np.array([[1.0, 2.0], [4.0, 5.0]])
mat_batch = np.array([[[1.0, 0.0, 0.0], [0.0, 2.0, 0.0]],
[[4.0, 0.0, 0.0], [0.0, 5.0, 0.0]]])
self.assertEqual(mat_batch.shape, (2, 2, 3))
mat_batch_diag = array_ops.matrix_diag_part(mat_batch)
self.assertEqual((2, 2), mat_batch_diag.get_shape())
self.assertAllEqual(mat_batch_diag.eval(), v_batch)
开发者ID:JonathanRaiman,项目名称:tensorflow,代码行数:9,代码来源:diag_op_test.py
示例20: _sqrt_log_det
def _sqrt_log_det(self):
# The matrix determinant lemma states:
# det(M + VDV^T) = det(D^{-1} + V^T M^{-1} V) * det(D) * det(M)
# = det(C) * det(D) * det(M)
#
# Here we compute the Cholesky factor of "C", then pass the result on.
diag_chol_c = array_ops.matrix_diag_part(
self._chol_capacitance(batch_mode=False))
return self._sqrt_log_det_core(diag_chol_c)
开发者ID:ComeOnGetMe,项目名称:tensorflow,代码行数:9,代码来源:operator_pd_vdvt_update.py
注:本文中的tensorflow.python.ops.array_ops.matrix_diag_part函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。 |
请发表评论